From b562001d190876cbaa79bb9f5b12b6b4a54be962 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 21 Jun 2022 18:29:45 +0000 Subject: [PATCH] feat: add model_version_id to UploadModelResponse in aiplatform v1 model_service.proto PiperOrigin-RevId: 455486702 Source-Link: https://github.com/googleapis/googleapis/commit/f2e55382d437b3f7463176c95cedc1293c5971fb Source-Link: https://github.com/googleapis/googleapis-gen/commit/15200223b157cb9e76c7923bc387dd6e9c0a83bd Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTUyMDAyMjNiMTU3Y2I5ZTc2Yzc5MjNiYzM4N2RkNmU5YzBhODNiZCJ9 --- owl-bot-staging/v1/.coveragerc | 17 + owl-bot-staging/v1/.flake8 | 33 + owl-bot-staging/v1/MANIFEST.in | 2 + owl-bot-staging/v1/README.rst | 49 + .../v1/docs/aiplatform_v1/dataset_service.rst | 10 + .../docs/aiplatform_v1/endpoint_service.rst | 10 + .../featurestore_online_serving_service.rst | 6 + .../aiplatform_v1/featurestore_service.rst | 10 + .../aiplatform_v1/index_endpoint_service.rst | 10 + .../v1/docs/aiplatform_v1/index_service.rst | 10 + .../v1/docs/aiplatform_v1/job_service.rst | 10 + .../docs/aiplatform_v1/metadata_service.rst | 10 + .../docs/aiplatform_v1/migration_service.rst | 10 + .../v1/docs/aiplatform_v1/model_service.rst | 10 + .../docs/aiplatform_v1/pipeline_service.rst | 10 + .../docs/aiplatform_v1/prediction_service.rst | 6 + .../v1/docs/aiplatform_v1/services.rst | 20 + .../aiplatform_v1/specialist_pool_service.rst | 10 + .../aiplatform_v1/tensorboard_service.rst | 10 + .../v1/docs/aiplatform_v1/types.rst | 7 + .../v1/docs/aiplatform_v1/vizier_service.rst | 10 + owl-bot-staging/v1/docs/conf.py | 376 + .../v1/docs/definition_v1/services.rst | 4 + .../v1/docs/definition_v1/types.rst | 7 + owl-bot-staging/v1/docs/index.rst | 7 + .../v1/docs/instance_v1/services.rst | 4 + owl-bot-staging/v1/docs/instance_v1/types.rst | 7 + .../v1/docs/params_v1/services.rst | 4 + owl-bot-staging/v1/docs/params_v1/types.rst | 7 + .../v1/docs/prediction_v1/services.rst | 4 + .../v1/docs/prediction_v1/types.rst | 7 + .../v1/google/cloud/aiplatform/__init__.py | 953 + .../v1/google/cloud/aiplatform/py.typed | 2 + .../v1/schema/predict/instance/__init__.py | 37 + .../v1/schema/predict/instance/py.typed | 2 + .../v1/schema/predict/instance_v1/__init__.py | 38 + .../predict/instance_v1/gapic_metadata.json | 7 + .../v1/schema/predict/instance_v1/py.typed | 2 + .../predict/instance_v1/services/__init__.py | 15 + .../predict/instance_v1/types/__init__.py | 54 + .../instance_v1/types/image_classification.py | 56 + .../types/image_object_detection.py | 56 + .../instance_v1/types/image_segmentation.py | 50 + .../instance_v1/types/text_classification.py | 49 + .../instance_v1/types/text_extraction.py | 62 + .../instance_v1/types/text_sentiment.py | 49 + .../types/video_action_recognition.py | 73 + .../instance_v1/types/video_classification.py | 73 + .../types/video_object_tracking.py | 73 + .../v1/schema/predict/params/__init__.py | 31 + .../v1/schema/predict/params/py.typed | 2 + .../v1/schema/predict/params_v1/__init__.py | 32 + .../predict/params_v1/gapic_metadata.json | 7 + .../v1/schema/predict/params_v1/py.typed | 2 + .../predict/params_v1/services/__init__.py | 15 + .../predict/params_v1/types/__init__.py | 42 + .../params_v1/types/image_classification.py | 52 + .../params_v1/types/image_object_detection.py | 53 + .../params_v1/types/image_segmentation.py | 45 + .../types/video_action_recognition.py | 53 + .../params_v1/types/video_classification.py | 95 + .../params_v1/types/video_object_tracking.py | 61 + .../v1/schema/predict/prediction/__init__.py | 39 + .../v1/schema/predict/prediction/py.typed | 2 + .../schema/predict/prediction_v1/__init__.py | 40 + .../predict/prediction_v1/gapic_metadata.json | 7 + .../v1/schema/predict/prediction_v1/py.typed | 2 + .../prediction_v1/services/__init__.py | 15 + .../predict/prediction_v1/types/__init__.py | 58 + .../prediction_v1/types/classification.py | 57 + .../types/image_object_detection.py | 73 + .../prediction_v1/types/image_segmentation.py | 62 + .../types/tabular_classification.py | 52 + .../prediction_v1/types/tabular_regression.py | 53 + .../prediction_v1/types/text_extraction.py | 78 + .../prediction_v1/types/text_sentiment.py | 48 + .../types/video_action_recognition.py | 85 + .../types/video_classification.py | 103 + .../types/video_object_tracking.py | 145 + .../schema/trainingjob/definition/__init__.py | 69 + .../v1/schema/trainingjob/definition/py.typed | 2 + .../trainingjob/definition_v1/__init__.py | 70 + .../definition_v1/gapic_metadata.json | 7 + .../schema/trainingjob/definition_v1/py.typed | 2 + .../definition_v1/services/__init__.py | 15 + .../definition_v1/types/__init__.py | 90 + .../types/automl_image_classification.py | 158 + .../types/automl_image_object_detection.py | 139 + .../types/automl_image_segmentation.py | 133 + .../definition_v1/types/automl_tables.py | 529 + .../types/automl_text_classification.py | 58 + .../types/automl_text_extraction.py | 49 + .../types/automl_text_sentiment.py | 67 + .../types/automl_video_action_recognition.py | 66 + .../types/automl_video_classification.py | 65 + .../types/automl_video_object_tracking.py | 68 + .../export_evaluated_data_items_config.py | 57 + .../v1/google/cloud/aiplatform_v1/__init__.py | 954 + .../cloud/aiplatform_v1/gapic_metadata.json | 2099 ++ .../v1/google/cloud/aiplatform_v1/py.typed | 2 + .../cloud/aiplatform_v1/services/__init__.py | 15 + .../services/dataset_service/__init__.py | 22 + .../services/dataset_service/async_client.py | 1989 + .../services/dataset_service/client.py | 2210 ++ .../services/dataset_service/pagers.py | 384 + .../dataset_service/transports/__init__.py | 33 + .../dataset_service/transports/base.py | 381 + .../dataset_service/transports/grpc.py | 728 + .../transports/grpc_asyncio.py | 727 + .../services/endpoint_service/__init__.py | 22 + .../services/endpoint_service/async_client.py | 1729 + .../services/endpoint_service/client.py | 1948 + .../services/endpoint_service/pagers.py | 140 + .../endpoint_service/transports/__init__.py | 33 + .../endpoint_service/transports/base.py | 338 + .../endpoint_service/transports/grpc.py | 651 + .../transports/grpc_asyncio.py | 650 + .../__init__.py | 22 + .../async_client.py | 1058 + .../client.py | 1252 + .../transports/__init__.py | 33 + .../transports/base.py | 259 + .../transports/grpc.py | 502 + .../transports/grpc_asyncio.py | 501 + .../services/featurestore_service/__init__.py | 22 + .../featurestore_service/async_client.py | 3377 ++ .../services/featurestore_service/client.py | 3589 ++ .../services/featurestore_service/pagers.py | 505 + .../transports/__init__.py | 33 + .../featurestore_service/transports/base.py | 523 + .../featurestore_service/transports/grpc.py | 1022 + .../transports/grpc_asyncio.py | 1021 + .../index_endpoint_service/__init__.py | 22 + .../index_endpoint_service/async_client.py | 1793 + .../services/index_endpoint_service/client.py | 1996 + .../services/index_endpoint_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../index_endpoint_service/transports/base.py | 352 + .../index_endpoint_service/transports/grpc.py | 679 + .../transports/grpc_asyncio.py | 678 + .../services/index_service/__init__.py | 22 + .../services/index_service/async_client.py | 1432 + .../services/index_service/client.py | 1635 + .../services/index_service/pagers.py | 140 + .../index_service/transports/__init__.py | 33 + .../services/index_service/transports/base.py | 309 + .../services/index_service/transports/grpc.py | 598 + .../index_service/transports/grpc_asyncio.py | 597 + .../services/job_service/__init__.py | 22 + .../services/job_service/async_client.py | 3914 ++ .../services/job_service/client.py | 4198 +++ .../services/job_service/pagers.py | 750 + .../job_service/transports/__init__.py | 33 + .../services/job_service/transports/base.py | 641 + .../services/job_service/transports/grpc.py | 1262 + .../job_service/transports/grpc_asyncio.py | 1261 + .../services/metadata_service/__init__.py | 22 + .../services/metadata_service/async_client.py | 4287 +++ .../services/metadata_service/client.py | 4517 +++ .../services/metadata_service/pagers.py | 628 + .../metadata_service/transports/__init__.py | 33 + .../metadata_service/transports/base.py | 682 + .../metadata_service/transports/grpc.py | 1301 + .../transports/grpc_asyncio.py | 1300 + .../services/migration_service/__init__.py | 22 + .../migration_service/async_client.py | 1109 + .../services/migration_service/client.py | 1357 + .../services/migration_service/pagers.py | 140 + .../migration_service/transports/__init__.py | 33 + .../migration_service/transports/base.py | 266 + .../migration_service/transports/grpc.py | 522 + .../transports/grpc_asyncio.py | 521 + .../services/model_service/__init__.py | 22 + .../services/model_service/async_client.py | 2457 ++ .../services/model_service/client.py | 2687 ++ .../services/model_service/pagers.py | 505 + .../model_service/transports/__init__.py | 33 + .../services/model_service/transports/base.py | 439 + .../services/model_service/transports/grpc.py | 849 + .../model_service/transports/grpc_asyncio.py | 848 + .../services/pipeline_service/__init__.py | 22 + .../services/pipeline_service/async_client.py | 1953 + .../services/pipeline_service/client.py | 2219 ++ .../services/pipeline_service/pagers.py | 262 + .../pipeline_service/transports/__init__.py | 33 + .../pipeline_service/transports/base.py | 383 + .../pipeline_service/transports/grpc.py | 758 + .../transports/grpc_asyncio.py | 757 + .../services/prediction_service/__init__.py | 22 + .../prediction_service/async_client.py | 1316 + .../services/prediction_service/client.py | 1519 + .../prediction_service/transports/__init__.py | 33 + .../prediction_service/transports/base.py | 274 + .../prediction_service/transports/grpc.py | 545 + .../transports/grpc_asyncio.py | 544 + .../specialist_pool_service/__init__.py | 22 + .../specialist_pool_service/async_client.py | 1452 + .../specialist_pool_service/client.py | 1646 + .../specialist_pool_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../transports/base.py | 309 + .../transports/grpc.py | 601 + .../transports/grpc_asyncio.py | 600 + .../services/tensorboard_service/__init__.py | 22 + .../tensorboard_service/async_client.py | 3998 ++ .../services/tensorboard_service/client.py | 4219 +++ .../services/tensorboard_service/pagers.py | 628 + .../transports/__init__.py | 33 + .../tensorboard_service/transports/base.py | 638 + .../tensorboard_service/transports/grpc.py | 1222 + .../transports/grpc_asyncio.py | 1221 + .../services/vizier_service/__init__.py | 22 + .../services/vizier_service/async_client.py | 2264 ++ .../services/vizier_service/client.py | 2481 ++ .../services/vizier_service/pagers.py | 261 + .../vizier_service/transports/__init__.py | 33 + .../vizier_service/transports/base.py | 451 + .../vizier_service/transports/grpc.py | 876 + .../vizier_service/transports/grpc_asyncio.py | 875 + .../cloud/aiplatform_v1/types/__init__.py | 1026 + .../aiplatform_v1/types/accelerator_type.py | 40 + .../cloud/aiplatform_v1/types/annotation.py | 129 + .../aiplatform_v1/types/annotation_spec.py | 78 + .../cloud/aiplatform_v1/types/artifact.py | 153 + .../types/batch_prediction_job.py | 505 + .../aiplatform_v1/types/completion_stats.py | 72 + .../cloud/aiplatform_v1/types/context.py | 136 + .../cloud/aiplatform_v1/types/custom_job.py | 476 + .../cloud/aiplatform_v1/types/data_item.py | 101 + .../aiplatform_v1/types/data_labeling_job.py | 350 + .../cloud/aiplatform_v1/types/dataset.py | 236 + .../aiplatform_v1/types/dataset_service.py | 543 + .../aiplatform_v1/types/deployed_index_ref.py | 49 + .../aiplatform_v1/types/deployed_model_ref.py | 48 + .../aiplatform_v1/types/encryption_spec.py | 47 + .../cloud/aiplatform_v1/types/endpoint.py | 422 + .../aiplatform_v1/types/endpoint_service.py | 409 + .../cloud/aiplatform_v1/types/entity_type.py | 118 + .../cloud/aiplatform_v1/types/env_var.py | 56 + .../google/cloud/aiplatform_v1/types/event.py | 93 + .../cloud/aiplatform_v1/types/execution.py | 149 + .../cloud/aiplatform_v1/types/explanation.py | 838 + .../types/explanation_metadata.py | 467 + .../cloud/aiplatform_v1/types/feature.py | 179 + .../types/feature_monitoring_stats.py | 124 + .../aiplatform_v1/types/feature_selector.py | 62 + .../cloud/aiplatform_v1/types/featurestore.py | 175 + .../types/featurestore_monitoring.py | 190 + .../types/featurestore_online_service.py | 381 + .../types/featurestore_service.py | 1665 + .../types/hyperparameter_tuning_job.py | 182 + .../google/cloud/aiplatform_v1/types/index.py | 142 + .../aiplatform_v1/types/index_endpoint.py | 384 + .../types/index_endpoint_service.py | 419 + .../aiplatform_v1/types/index_service.py | 362 + .../v1/google/cloud/aiplatform_v1/types/io.py | 198 + .../cloud/aiplatform_v1/types/job_service.py | 1093 + .../cloud/aiplatform_v1/types/job_state.py | 42 + .../aiplatform_v1/types/lineage_subgraph.py | 62 + .../aiplatform_v1/types/machine_resources.py | 347 + .../types/manual_batch_tuning_parameters.py | 50 + .../aiplatform_v1/types/metadata_schema.py | 96 + .../aiplatform_v1/types/metadata_service.py | 1477 + .../aiplatform_v1/types/metadata_store.py | 100 + .../types/migratable_resource.py | 228 + .../aiplatform_v1/types/migration_service.py | 479 + .../google/cloud/aiplatform_v1/types/model.py | 800 + .../types/model_deployment_monitoring_job.py | 490 + .../aiplatform_v1/types/model_evaluation.py | 180 + .../types/model_evaluation_slice.py | 110 + .../aiplatform_v1/types/model_monitoring.py | 420 + .../aiplatform_v1/types/model_service.py | 815 + .../cloud/aiplatform_v1/types/operation.py | 83 + .../types/pipeline_failure_policy.py | 41 + .../cloud/aiplatform_v1/types/pipeline_job.py | 574 + .../aiplatform_v1/types/pipeline_service.py | 416 + .../aiplatform_v1/types/pipeline_state.py | 40 + .../aiplatform_v1/types/prediction_service.py | 273 + .../aiplatform_v1/types/specialist_pool.py | 86 + .../types/specialist_pool_service.py | 237 + .../google/cloud/aiplatform_v1/types/study.py | 892 + .../cloud/aiplatform_v1/types/tensorboard.py | 131 + .../aiplatform_v1/types/tensorboard_data.py | 205 + .../types/tensorboard_experiment.py | 115 + .../aiplatform_v1/types/tensorboard_run.py | 112 + .../types/tensorboard_service.py | 1224 + .../types/tensorboard_time_series.py | 153 + .../aiplatform_v1/types/training_pipeline.py | 651 + .../google/cloud/aiplatform_v1/types/types.py | 86 + .../types/unmanaged_container_model.py | 62 + .../types/user_action_reference.py | 75 + .../google/cloud/aiplatform_v1/types/value.py | 69 + .../aiplatform_v1/types/vizier_service.py | 589 + owl-bot-staging/v1/mypy.ini | 3 + owl-bot-staging/v1/noxfile.py | 180 + ...ed_dataset_service_create_dataset_async.py | 55 + ...ted_dataset_service_create_dataset_sync.py | 55 + ...ed_dataset_service_delete_dataset_async.py | 49 + ...ted_dataset_service_delete_dataset_sync.py | 49 + ...rated_dataset_service_export_data_async.py | 53 + ...erated_dataset_service_export_data_sync.py | 53 + ...taset_service_get_annotation_spec_async.py | 45 + ...ataset_service_get_annotation_spec_sync.py | 45 + ...rated_dataset_service_get_dataset_async.py | 45 + ...erated_dataset_service_get_dataset_sync.py | 45 + ...rated_dataset_service_import_data_async.py | 54 + ...erated_dataset_service_import_data_sync.py | 54 + ..._dataset_service_list_annotations_async.py | 46 + ...d_dataset_service_list_annotations_sync.py | 46 + ...d_dataset_service_list_data_items_async.py | 46 + ...ed_dataset_service_list_data_items_sync.py | 46 + ...ted_dataset_service_list_datasets_async.py | 46 + ...ated_dataset_service_list_datasets_sync.py | 46 + ...ed_dataset_service_update_dataset_async.py | 50 + ...ted_dataset_service_update_dataset_sync.py | 50 + ..._endpoint_service_create_endpoint_async.py | 53 + ...d_endpoint_service_create_endpoint_sync.py | 53 + ..._endpoint_service_delete_endpoint_async.py | 49 + ...d_endpoint_service_delete_endpoint_sync.py | 49 + ...ted_endpoint_service_deploy_model_async.py | 54 + ...ated_endpoint_service_deploy_model_sync.py | 54 + ...ted_endpoint_service_get_endpoint_async.py | 45 + ...ated_endpoint_service_get_endpoint_sync.py | 45 + ...d_endpoint_service_list_endpoints_async.py | 46 + ...ed_endpoint_service_list_endpoints_sync.py | 46 + ...d_endpoint_service_undeploy_model_async.py | 50 + ...ed_endpoint_service_undeploy_model_sync.py | 50 + ..._endpoint_service_update_endpoint_async.py | 48 + ...d_endpoint_service_update_endpoint_sync.py | 48 + ...rving_service_read_feature_values_async.py | 50 + ...erving_service_read_feature_values_sync.py | 50 + ...ice_streaming_read_feature_values_async.py | 51 + ...vice_streaming_read_feature_values_sync.py | 51 + ...ore_service_batch_create_features_async.py | 55 + ...tore_service_batch_create_features_sync.py | 55 + ...service_batch_read_feature_values_async.py | 62 + ..._service_batch_read_feature_values_sync.py | 62 + ...estore_service_create_entity_type_async.py | 50 + ...restore_service_create_entity_type_sync.py | 50 + ...aturestore_service_create_feature_async.py | 54 + ...eaturestore_service_create_feature_sync.py | 54 + ...store_service_create_featurestore_async.py | 50 + ...estore_service_create_featurestore_sync.py | 50 + ...estore_service_delete_entity_type_async.py | 49 + ...restore_service_delete_entity_type_sync.py | 49 + ...aturestore_service_delete_feature_async.py | 49 + ...eaturestore_service_delete_feature_sync.py | 49 + ...store_service_delete_featurestore_async.py | 49 + ...estore_service_delete_featurestore_sync.py | 49 + ...ore_service_export_feature_values_async.py | 57 + ...tore_service_export_feature_values_sync.py | 57 + ...turestore_service_get_entity_type_async.py | 45 + ...aturestore_service_get_entity_type_sync.py | 45 + ..._featurestore_service_get_feature_async.py | 45 + ...d_featurestore_service_get_feature_sync.py | 45 + ...urestore_service_get_featurestore_async.py | 45 + ...turestore_service_get_featurestore_sync.py | 45 + ...ore_service_import_feature_values_async.py | 58 + ...tore_service_import_feature_values_sync.py | 58 + ...restore_service_list_entity_types_async.py | 46 + ...urestore_service_list_entity_types_sync.py | 46 + ...eaturestore_service_list_features_async.py | 46 + ...featurestore_service_list_features_sync.py | 46 + ...estore_service_list_featurestores_async.py | 46 + ...restore_service_list_featurestores_sync.py | 46 + ...turestore_service_search_features_async.py | 46 + ...aturestore_service_search_features_sync.py | 46 + ...estore_service_update_entity_type_async.py | 44 + ...restore_service_update_entity_type_sync.py | 44 + ...aturestore_service_update_feature_async.py | 48 + ...eaturestore_service_update_feature_sync.py | 48 + ...store_service_update_featurestore_async.py | 48 + ...estore_service_update_featurestore_sync.py | 48 + ...int_service_create_index_endpoint_async.py | 53 + ...oint_service_create_index_endpoint_sync.py | 53 + ...int_service_delete_index_endpoint_async.py | 49 + ...oint_service_delete_index_endpoint_sync.py | 49 + ...dex_endpoint_service_deploy_index_async.py | 54 + ...ndex_endpoint_service_deploy_index_sync.py | 54 + ...dpoint_service_get_index_endpoint_async.py | 45 + ...ndpoint_service_get_index_endpoint_sync.py | 45 + ...oint_service_list_index_endpoints_async.py | 46 + ...point_service_list_index_endpoints_sync.py | 46 + ...int_service_mutate_deployed_index_async.py | 54 + ...oint_service_mutate_deployed_index_sync.py | 54 + ...x_endpoint_service_undeploy_index_async.py | 50 + ...ex_endpoint_service_undeploy_index_sync.py | 50 + ...int_service_update_index_endpoint_async.py | 48 + ...oint_service_update_index_endpoint_sync.py | 48 + ...erated_index_service_create_index_async.py | 53 + ...nerated_index_service_create_index_sync.py | 53 + ...erated_index_service_delete_index_async.py | 49 + ...nerated_index_service_delete_index_sync.py | 49 + ...generated_index_service_get_index_async.py | 45 + ..._generated_index_service_get_index_sync.py | 45 + ...erated_index_service_list_indexes_async.py | 46 + ...nerated_index_service_list_indexes_sync.py | 46 + ...erated_index_service_update_index_async.py | 52 + ...nerated_index_service_update_index_sync.py | 52 + ...rvice_cancel_batch_prediction_job_async.py | 43 + ...ervice_cancel_batch_prediction_job_sync.py | 43 + ...ted_job_service_cancel_custom_job_async.py | 43 + ...ated_job_service_cancel_custom_job_sync.py | 43 + ..._service_cancel_data_labeling_job_async.py | 43 + ...b_service_cancel_data_labeling_job_sync.py | 43 + ..._cancel_hyperparameter_tuning_job_async.py | 43 + ...e_cancel_hyperparameter_tuning_job_sync.py | 43 + ...rvice_create_batch_prediction_job_async.py | 53 + ...ervice_create_batch_prediction_job_sync.py | 53 + ...ted_job_service_create_custom_job_async.py | 50 + ...ated_job_service_create_custom_job_sync.py | 50 + ..._service_create_data_labeling_job_async.py | 54 + ...b_service_create_data_labeling_job_sync.py | 54 + ..._create_hyperparameter_tuning_job_async.py | 57 + ...e_create_hyperparameter_tuning_job_sync.py | 57 + ...e_model_deployment_monitoring_job_async.py | 50 + ...te_model_deployment_monitoring_job_sync.py | 50 + ...rvice_delete_batch_prediction_job_async.py | 49 + ...ervice_delete_batch_prediction_job_sync.py | 49 + ...ted_job_service_delete_custom_job_async.py | 49 + ...ated_job_service_delete_custom_job_sync.py | 49 + ..._service_delete_data_labeling_job_async.py | 49 + ...b_service_delete_data_labeling_job_sync.py | 49 + ..._delete_hyperparameter_tuning_job_async.py | 49 + ...e_delete_hyperparameter_tuning_job_sync.py | 49 + ...e_model_deployment_monitoring_job_async.py | 49 + ...te_model_deployment_monitoring_job_sync.py | 49 + ..._service_get_batch_prediction_job_async.py | 45 + ...b_service_get_batch_prediction_job_sync.py | 45 + ...erated_job_service_get_custom_job_async.py | 45 + ...nerated_job_service_get_custom_job_sync.py | 45 + ...job_service_get_data_labeling_job_async.py | 45 + ..._job_service_get_data_labeling_job_sync.py | 45 + ...ice_get_hyperparameter_tuning_job_async.py | 45 + ...vice_get_hyperparameter_tuning_job_sync.py | 45 + ...t_model_deployment_monitoring_job_async.py | 45 + ...et_model_deployment_monitoring_job_sync.py | 45 + ...ervice_list_batch_prediction_jobs_async.py | 46 + ...service_list_batch_prediction_jobs_sync.py | 46 + ...ated_job_service_list_custom_jobs_async.py | 46 + ...rated_job_service_list_custom_jobs_sync.py | 46 + ...b_service_list_data_labeling_jobs_async.py | 46 + ...ob_service_list_data_labeling_jobs_sync.py | 46 + ...e_list_hyperparameter_tuning_jobs_async.py | 46 + ...ce_list_hyperparameter_tuning_jobs_sync.py | 46 + ..._model_deployment_monitoring_jobs_async.py | 46 + ...t_model_deployment_monitoring_jobs_sync.py | 46 + ...e_model_deployment_monitoring_job_async.py | 43 + ...se_model_deployment_monitoring_job_sync.py | 43 + ...e_model_deployment_monitoring_job_async.py | 43 + ...me_model_deployment_monitoring_job_sync.py | 43 + ...oyment_monitoring_stats_anomalies_async.py | 47 + ...loyment_monitoring_stats_anomalies_sync.py | 47 + ...e_model_deployment_monitoring_job_async.py | 53 + ...te_model_deployment_monitoring_job_sync.py | 53 + ..._context_artifacts_and_executions_async.py | 45 + ...d_context_artifacts_and_executions_sync.py | 45 + ...data_service_add_context_children_async.py | 45 + ...adata_service_add_context_children_sync.py | 45 + ...data_service_add_execution_events_async.py | 45 + ...adata_service_add_execution_events_sync.py | 45 + ..._metadata_service_create_artifact_async.py | 45 + ...d_metadata_service_create_artifact_sync.py | 45 + ...d_metadata_service_create_context_async.py | 45 + ...ed_metadata_service_create_context_sync.py | 45 + ...metadata_service_create_execution_async.py | 45 + ..._metadata_service_create_execution_sync.py | 45 + ...ta_service_create_metadata_schema_async.py | 49 + ...ata_service_create_metadata_schema_sync.py | 49 + ...ata_service_create_metadata_store_async.py | 49 + ...data_service_create_metadata_store_sync.py | 49 + ..._metadata_service_delete_artifact_async.py | 49 + ...d_metadata_service_delete_artifact_sync.py | 49 + ...d_metadata_service_delete_context_async.py | 49 + ...ed_metadata_service_delete_context_sync.py | 49 + ...metadata_service_delete_execution_async.py | 49 + ..._metadata_service_delete_execution_sync.py | 49 + ...ata_service_delete_metadata_store_async.py | 49 + ...data_service_delete_metadata_store_sync.py | 49 + ...ted_metadata_service_get_artifact_async.py | 45 + ...ated_metadata_service_get_artifact_sync.py | 45 + ...ated_metadata_service_get_context_async.py | 45 + ...rated_metadata_service_get_context_sync.py | 45 + ...ed_metadata_service_get_execution_async.py | 45 + ...ted_metadata_service_get_execution_sync.py | 45 + ...adata_service_get_metadata_schema_async.py | 45 + ...tadata_service_get_metadata_schema_sync.py | 45 + ...tadata_service_get_metadata_store_async.py | 45 + ...etadata_service_get_metadata_store_sync.py | 45 + ...d_metadata_service_list_artifacts_async.py | 46 + ...ed_metadata_service_list_artifacts_sync.py | 46 + ...ed_metadata_service_list_contexts_async.py | 46 + ...ted_metadata_service_list_contexts_sync.py | 46 + ..._metadata_service_list_executions_async.py | 46 + ...d_metadata_service_list_executions_sync.py | 46 + ...ata_service_list_metadata_schemas_async.py | 46 + ...data_service_list_metadata_schemas_sync.py | 46 + ...data_service_list_metadata_stores_async.py | 46 + ...adata_service_list_metadata_stores_sync.py | 46 + ..._metadata_service_purge_artifacts_async.py | 50 + ...d_metadata_service_purge_artifacts_sync.py | 50 + ...d_metadata_service_purge_contexts_async.py | 50 + ...ed_metadata_service_purge_contexts_sync.py | 50 + ...metadata_service_purge_executions_async.py | 50 + ..._metadata_service_purge_executions_sync.py | 50 + ...e_query_artifact_lineage_subgraph_async.py | 45 + ...ce_query_artifact_lineage_subgraph_sync.py | 45 + ...ce_query_context_lineage_subgraph_async.py | 45 + ...ice_query_context_lineage_subgraph_sync.py | 45 + ...uery_execution_inputs_and_outputs_async.py | 45 + ...query_execution_inputs_and_outputs_sync.py | 45 + ..._metadata_service_update_artifact_async.py | 44 + ...d_metadata_service_update_artifact_sync.py | 44 + ...d_metadata_service_update_context_async.py | 44 + ...ed_metadata_service_update_context_sync.py | 44 + ...metadata_service_update_execution_async.py | 44 + ..._metadata_service_update_execution_sync.py | 44 + ...n_service_batch_migrate_resources_async.py | 55 + ...on_service_batch_migrate_resources_sync.py | 55 + ...rvice_search_migratable_resources_async.py | 46 + ...ervice_search_migratable_resources_sync.py | 46 + ...erated_model_service_delete_model_async.py | 49 + ...nerated_model_service_delete_model_sync.py | 49 + ...odel_service_delete_model_version_async.py | 49 + ...model_service_delete_model_version_sync.py | 49 + ...erated_model_service_export_model_async.py | 49 + ...nerated_model_service_export_model_sync.py | 49 + ...generated_model_service_get_model_async.py | 45 + ...odel_service_get_model_evaluation_async.py | 45 + ...ervice_get_model_evaluation_slice_async.py | 45 + ...service_get_model_evaluation_slice_sync.py | 45 + ...model_service_get_model_evaluation_sync.py | 45 + ..._generated_model_service_get_model_sync.py | 45 + ...l_service_import_model_evaluation_async.py | 45 + ...el_service_import_model_evaluation_sync.py | 45 + ...vice_list_model_evaluation_slices_async.py | 46 + ...rvice_list_model_evaluation_slices_sync.py | 46 + ...el_service_list_model_evaluations_async.py | 46 + ...del_service_list_model_evaluations_sync.py | 46 + ...model_service_list_model_versions_async.py | 46 + ..._model_service_list_model_versions_sync.py | 46 + ...nerated_model_service_list_models_async.py | 46 + ...enerated_model_service_list_models_sync.py | 46 + ...del_service_merge_version_aliases_async.py | 46 + ...odel_service_merge_version_aliases_sync.py | 46 + ...erated_model_service_update_model_async.py | 48 + ...nerated_model_service_update_model_sync.py | 48 + ...erated_model_service_upload_model_async.py | 53 + ...nerated_model_service_upload_model_sync.py | 53 + ...eline_service_cancel_pipeline_job_async.py | 43 + ...peline_service_cancel_pipeline_job_sync.py | 43 + ..._service_cancel_training_pipeline_async.py | 43 + ...e_service_cancel_training_pipeline_sync.py | 43 + ...eline_service_create_pipeline_job_async.py | 45 + ...peline_service_create_pipeline_job_sync.py | 45 + ..._service_create_training_pipeline_async.py | 51 + ...e_service_create_training_pipeline_sync.py | 51 + ...eline_service_delete_pipeline_job_async.py | 49 + ...peline_service_delete_pipeline_job_sync.py | 49 + ..._service_delete_training_pipeline_async.py | 49 + ...e_service_delete_training_pipeline_sync.py | 49 + ...pipeline_service_get_pipeline_job_async.py | 45 + ..._pipeline_service_get_pipeline_job_sync.py | 45 + ...ine_service_get_training_pipeline_async.py | 45 + ...line_service_get_training_pipeline_sync.py | 45 + ...peline_service_list_pipeline_jobs_async.py | 46 + ...ipeline_service_list_pipeline_jobs_sync.py | 46 + ...e_service_list_training_pipelines_async.py | 46 + ...ne_service_list_training_pipelines_sync.py | 46 + ...erated_prediction_service_explain_async.py | 49 + ...nerated_prediction_service_explain_sync.py | 49 + ...erated_prediction_service_predict_async.py | 49 + ...nerated_prediction_service_predict_sync.py | 49 + ...ed_prediction_service_raw_predict_async.py | 45 + ...ted_prediction_service_raw_predict_sync.py | 45 + ...ol_service_create_specialist_pool_async.py | 54 + ...ool_service_create_specialist_pool_sync.py | 54 + ...ol_service_delete_specialist_pool_async.py | 49 + ...ool_service_delete_specialist_pool_sync.py | 49 + ..._pool_service_get_specialist_pool_async.py | 45 + ...t_pool_service_get_specialist_pool_sync.py | 45 + ...ool_service_list_specialist_pools_async.py | 46 + ...pool_service_list_specialist_pools_sync.py | 46 + ...ol_service_update_specialist_pool_async.py | 53 + ...ool_service_update_specialist_pool_sync.py | 53 + ...ice_batch_create_tensorboard_runs_async.py | 51 + ...vice_batch_create_tensorboard_runs_sync.py | 51 + ...ch_create_tensorboard_time_series_async.py | 51 + ...tch_create_tensorboard_time_series_sync.py | 51 + ...read_tensorboard_time_series_data_async.py | 46 + ..._read_tensorboard_time_series_data_sync.py | 46 + ...rboard_service_create_tensorboard_async.py | 53 + ...ice_create_tensorboard_experiment_async.py | 46 + ...vice_create_tensorboard_experiment_sync.py | 46 + ...rd_service_create_tensorboard_run_async.py | 50 + ...ard_service_create_tensorboard_run_sync.py | 50 + ...orboard_service_create_tensorboard_sync.py | 53 + ...ce_create_tensorboard_time_series_async.py | 50 + ...ice_create_tensorboard_time_series_sync.py | 50 + ...rboard_service_delete_tensorboard_async.py | 49 + ...ice_delete_tensorboard_experiment_async.py | 49 + ...vice_delete_tensorboard_experiment_sync.py | 49 + ...rd_service_delete_tensorboard_run_async.py | 49 + ...ard_service_delete_tensorboard_run_sync.py | 49 + ...orboard_service_delete_tensorboard_sync.py | 49 + ...ce_delete_tensorboard_time_series_async.py | 49 + ...ice_delete_tensorboard_time_series_sync.py | 49 + ...port_tensorboard_time_series_data_async.py | 46 + ...xport_tensorboard_time_series_data_sync.py | 46 + ...nsorboard_service_get_tensorboard_async.py | 45 + ...ervice_get_tensorboard_experiment_async.py | 45 + ...service_get_tensorboard_experiment_sync.py | 45 + ...board_service_get_tensorboard_run_async.py | 45 + ...rboard_service_get_tensorboard_run_sync.py | 45 + ...ensorboard_service_get_tensorboard_sync.py | 45 + ...rvice_get_tensorboard_time_series_async.py | 45 + ...ervice_get_tensorboard_time_series_sync.py | 45 + ...vice_list_tensorboard_experiments_async.py | 46 + ...rvice_list_tensorboard_experiments_sync.py | 46 + ...ard_service_list_tensorboard_runs_async.py | 46 + ...oard_service_list_tensorboard_runs_sync.py | 46 + ...vice_list_tensorboard_time_series_async.py | 46 + ...rvice_list_tensorboard_time_series_sync.py | 46 + ...orboard_service_list_tensorboards_async.py | 46 + ...sorboard_service_list_tensorboards_sync.py | 46 + ...ervice_read_tensorboard_blob_data_async.py | 46 + ...service_read_tensorboard_blob_data_sync.py | 46 + ...read_tensorboard_time_series_data_async.py | 45 + ..._read_tensorboard_time_series_data_sync.py | 45 + ...rboard_service_update_tensorboard_async.py | 52 + ...ice_update_tensorboard_experiment_async.py | 44 + ...vice_update_tensorboard_experiment_sync.py | 44 + ...rd_service_update_tensorboard_run_async.py | 48 + ...ard_service_update_tensorboard_run_sync.py | 48 + ...orboard_service_update_tensorboard_sync.py | 52 + ...ce_update_tensorboard_time_series_async.py | 49 + ...ice_update_tensorboard_time_series_sync.py | 49 + ...write_tensorboard_experiment_data_async.py | 51 + ..._write_tensorboard_experiment_data_sync.py | 51 + ...ervice_write_tensorboard_run_data_async.py | 50 + ...service_write_tensorboard_run_data_sync.py | 50 + ...ier_service_add_trial_measurement_async.py | 45 + ...zier_service_add_trial_measurement_sync.py | 45 + ..._check_trial_early_stopping_state_async.py | 49 + ...e_check_trial_early_stopping_state_sync.py | 49 + ...ted_vizier_service_complete_trial_async.py | 45 + ...ated_vizier_service_complete_trial_sync.py | 45 + ...rated_vizier_service_create_study_async.py | 54 + ...erated_vizier_service_create_study_sync.py | 54 + ...rated_vizier_service_create_trial_async.py | 45 + ...erated_vizier_service_create_trial_sync.py | 45 + ...rated_vizier_service_delete_study_async.py | 43 + ...erated_vizier_service_delete_study_sync.py | 43 + ...rated_vizier_service_delete_trial_async.py | 43 + ...erated_vizier_service_delete_trial_sync.py | 43 + ...enerated_vizier_service_get_study_async.py | 45 + ...generated_vizier_service_get_study_sync.py | 45 + ...enerated_vizier_service_get_trial_async.py | 45 + ...generated_vizier_service_get_trial_sync.py | 45 + ...izier_service_list_optimal_trials_async.py | 45 + ...vizier_service_list_optimal_trials_sync.py | 45 + ...rated_vizier_service_list_studies_async.py | 46 + ...erated_vizier_service_list_studies_sync.py | 46 + ...erated_vizier_service_list_trials_async.py | 46 + ...nerated_vizier_service_list_trials_sync.py | 46 + ...rated_vizier_service_lookup_study_async.py | 46 + ...erated_vizier_service_lookup_study_sync.py | 46 + ...nerated_vizier_service_stop_trial_async.py | 45 + ...enerated_vizier_service_stop_trial_sync.py | 45 + ...ted_vizier_service_suggest_trials_async.py | 51 + ...ated_vizier_service_suggest_trials_sync.py | 51 + .../snippet_metadata_aiplatform_v1.json | 30886 +++++++++++++++ .../scripts/fixup_aiplatform_v1_keywords.py | 363 + .../scripts/fixup_definition_v1_keywords.py | 175 + .../v1/scripts/fixup_instance_v1_keywords.py | 175 + .../v1/scripts/fixup_params_v1_keywords.py | 175 + .../scripts/fixup_prediction_v1_keywords.py | 175 + owl-bot-staging/v1/setup.py | 59 + owl-bot-staging/v1/tests/__init__.py | 16 + owl-bot-staging/v1/tests/unit/__init__.py | 16 + .../v1/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/aiplatform_v1/__init__.py | 16 + .../aiplatform_v1/test_dataset_service.py | 5583 +++ .../aiplatform_v1/test_endpoint_service.py | 4536 +++ ...est_featurestore_online_serving_service.py | 2953 ++ .../test_featurestore_service.py | 8173 ++++ .../test_index_endpoint_service.py | 4694 +++ .../gapic/aiplatform_v1/test_index_service.py | 3945 ++ .../gapic/aiplatform_v1/test_job_service.py | 10658 ++++++ .../aiplatform_v1/test_metadata_service.py | 11200 ++++++ .../aiplatform_v1/test_migration_service.py | 3321 ++ .../gapic/aiplatform_v1/test_model_service.py | 6884 ++++ .../aiplatform_v1/test_pipeline_service.py | 5529 +++ .../aiplatform_v1/test_prediction_service.py | 3302 ++ .../test_specialist_pool_service.py | 3925 ++ .../aiplatform_v1/test_tensorboard_service.py | 10369 ++++++ .../aiplatform_v1/test_vizier_service.py | 6176 +++ .../unit/gapic/definition_v1/__init__.py | 16 + .../tests/unit/gapic/instance_v1/__init__.py | 16 + .../v1/tests/unit/gapic/params_v1/__init__.py | 16 + .../unit/gapic/prediction_v1/__init__.py | 16 + owl-bot-staging/v1beta1/.coveragerc | 17 + owl-bot-staging/v1beta1/.flake8 | 33 + owl-bot-staging/v1beta1/MANIFEST.in | 2 + owl-bot-staging/v1beta1/README.rst | 49 + .../aiplatform_v1beta1/dataset_service.rst | 10 + .../aiplatform_v1beta1/endpoint_service.rst | 10 + .../featurestore_online_serving_service.rst | 6 + .../featurestore_service.rst | 10 + .../index_endpoint_service.rst | 10 + .../docs/aiplatform_v1beta1/index_service.rst | 10 + .../docs/aiplatform_v1beta1/job_service.rst | 10 + .../aiplatform_v1beta1/metadata_service.rst | 10 + .../aiplatform_v1beta1/migration_service.rst | 10 + .../docs/aiplatform_v1beta1/model_service.rst | 10 + .../aiplatform_v1beta1/pipeline_service.rst | 10 + .../aiplatform_v1beta1/prediction_service.rst | 6 + .../docs/aiplatform_v1beta1/services.rst | 20 + .../specialist_pool_service.rst | 10 + .../tensorboard_service.rst | 10 + .../v1beta1/docs/aiplatform_v1beta1/types.rst | 7 + .../aiplatform_v1beta1/vizier_service.rst | 10 + owl-bot-staging/v1beta1/docs/conf.py | 376 + .../docs/definition_v1beta1/services.rst | 4 + .../v1beta1/docs/definition_v1beta1/types.rst | 7 + owl-bot-staging/v1beta1/docs/index.rst | 7 + .../docs/instance_v1beta1/services.rst | 4 + .../v1beta1/docs/instance_v1beta1/types.rst | 7 + .../v1beta1/docs/params_v1beta1/services.rst | 4 + .../v1beta1/docs/params_v1beta1/types.rst | 7 + .../docs/prediction_v1beta1/services.rst | 4 + .../v1beta1/docs/prediction_v1beta1/types.rst | 7 + .../google/cloud/aiplatform/__init__.py | 963 + .../v1beta1/google/cloud/aiplatform/py.typed | 2 + .../schema/predict/instance/__init__.py | 37 + .../v1beta1/schema/predict/instance/py.typed | 2 + .../predict/instance_v1beta1/__init__.py | 38 + .../instance_v1beta1/gapic_metadata.json | 7 + .../schema/predict/instance_v1beta1/py.typed | 2 + .../instance_v1beta1/services/__init__.py | 15 + .../instance_v1beta1/types/__init__.py | 54 + .../types/image_classification.py | 56 + .../types/image_object_detection.py | 56 + .../types/image_segmentation.py | 50 + .../types/text_classification.py | 49 + .../instance_v1beta1/types/text_extraction.py | 62 + .../instance_v1beta1/types/text_sentiment.py | 49 + .../types/video_action_recognition.py | 73 + .../types/video_classification.py | 73 + .../types/video_object_tracking.py | 73 + .../v1beta1/schema/predict/params/__init__.py | 31 + .../v1beta1/schema/predict/params/py.typed | 2 + .../schema/predict/params_v1beta1/__init__.py | 32 + .../params_v1beta1/gapic_metadata.json | 7 + .../schema/predict/params_v1beta1/py.typed | 2 + .../params_v1beta1/services/__init__.py | 15 + .../predict/params_v1beta1/types/__init__.py | 42 + .../types/image_classification.py | 52 + .../types/image_object_detection.py | 53 + .../types/image_segmentation.py | 45 + .../types/video_action_recognition.py | 53 + .../types/video_classification.py | 95 + .../types/video_object_tracking.py | 61 + .../schema/predict/prediction/__init__.py | 41 + .../schema/predict/prediction/py.typed | 2 + .../predict/prediction_v1beta1/__init__.py | 42 + .../prediction_v1beta1/gapic_metadata.json | 7 + .../predict/prediction_v1beta1/py.typed | 2 + .../prediction_v1beta1/services/__init__.py | 15 + .../prediction_v1beta1/types/__init__.py | 62 + .../types/classification.py | 57 + .../types/image_object_detection.py | 73 + .../types/image_segmentation.py | 62 + .../types/tabular_classification.py | 52 + .../types/tabular_regression.py | 53 + .../types/text_extraction.py | 78 + .../types/text_sentiment.py | 48 + .../types/time_series_forecasting.py | 41 + .../types/video_action_recognition.py | 85 + .../types/video_classification.py | 103 + .../types/video_object_tracking.py | 145 + .../schema/trainingjob/definition/__init__.py | 75 + .../schema/trainingjob/definition/py.typed | 2 + .../definition_v1beta1/__init__.py | 76 + .../definition_v1beta1/gapic_metadata.json | 7 + .../trainingjob/definition_v1beta1/py.typed | 2 + .../definition_v1beta1/services/__init__.py | 15 + .../definition_v1beta1/types/__init__.py | 98 + .../types/automl_image_classification.py | 158 + .../types/automl_image_object_detection.py | 139 + .../types/automl_image_segmentation.py | 133 + .../definition_v1beta1/types/automl_tables.py | 529 + .../types/automl_text_classification.py | 58 + .../types/automl_text_extraction.py | 49 + .../types/automl_text_sentiment.py | 67 + .../types/automl_time_series_forecasting.py | 493 + .../types/automl_video_action_recognition.py | 66 + .../types/automl_video_classification.py | 65 + .../types/automl_video_object_tracking.py | 68 + .../export_evaluated_data_items_config.py | 57 + .../cloud/aiplatform_v1beta1/__init__.py | 964 + .../aiplatform_v1beta1/gapic_metadata.json | 2109 ++ .../google/cloud/aiplatform_v1beta1/py.typed | 2 + .../aiplatform_v1beta1/services/__init__.py | 15 + .../services/dataset_service/__init__.py | 22 + .../services/dataset_service/async_client.py | 1989 + .../services/dataset_service/client.py | 2210 ++ .../services/dataset_service/pagers.py | 384 + .../dataset_service/transports/__init__.py | 33 + .../dataset_service/transports/base.py | 381 + .../dataset_service/transports/grpc.py | 728 + .../transports/grpc_asyncio.py | 727 + .../services/endpoint_service/__init__.py | 22 + .../services/endpoint_service/async_client.py | 1729 + .../services/endpoint_service/client.py | 1948 + .../services/endpoint_service/pagers.py | 140 + .../endpoint_service/transports/__init__.py | 33 + .../endpoint_service/transports/base.py | 338 + .../endpoint_service/transports/grpc.py | 651 + .../transports/grpc_asyncio.py | 650 + .../__init__.py | 22 + .../async_client.py | 1058 + .../client.py | 1252 + .../transports/__init__.py | 33 + .../transports/base.py | 259 + .../transports/grpc.py | 502 + .../transports/grpc_asyncio.py | 501 + .../services/featurestore_service/__init__.py | 22 + .../featurestore_service/async_client.py | 3378 ++ .../services/featurestore_service/client.py | 3590 ++ .../services/featurestore_service/pagers.py | 505 + .../transports/__init__.py | 33 + .../featurestore_service/transports/base.py | 523 + .../featurestore_service/transports/grpc.py | 1022 + .../transports/grpc_asyncio.py | 1021 + .../index_endpoint_service/__init__.py | 22 + .../index_endpoint_service/async_client.py | 1793 + .../services/index_endpoint_service/client.py | 1996 + .../services/index_endpoint_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../index_endpoint_service/transports/base.py | 352 + .../index_endpoint_service/transports/grpc.py | 679 + .../transports/grpc_asyncio.py | 678 + .../services/index_service/__init__.py | 22 + .../services/index_service/async_client.py | 1432 + .../services/index_service/client.py | 1635 + .../services/index_service/pagers.py | 140 + .../index_service/transports/__init__.py | 33 + .../services/index_service/transports/base.py | 309 + .../services/index_service/transports/grpc.py | 598 + .../index_service/transports/grpc_asyncio.py | 597 + .../services/job_service/__init__.py | 22 + .../services/job_service/async_client.py | 3914 ++ .../services/job_service/client.py | 4198 +++ .../services/job_service/pagers.py | 750 + .../job_service/transports/__init__.py | 33 + .../services/job_service/transports/base.py | 641 + .../services/job_service/transports/grpc.py | 1262 + .../job_service/transports/grpc_asyncio.py | 1261 + .../services/metadata_service/__init__.py | 22 + .../services/metadata_service/async_client.py | 4287 +++ .../services/metadata_service/client.py | 4517 +++ .../services/metadata_service/pagers.py | 628 + .../metadata_service/transports/__init__.py | 33 + .../metadata_service/transports/base.py | 682 + .../metadata_service/transports/grpc.py | 1301 + .../transports/grpc_asyncio.py | 1300 + .../services/migration_service/__init__.py | 22 + .../migration_service/async_client.py | 1109 + .../services/migration_service/client.py | 1357 + .../services/migration_service/pagers.py | 140 + .../migration_service/transports/__init__.py | 33 + .../migration_service/transports/base.py | 266 + .../migration_service/transports/grpc.py | 522 + .../transports/grpc_asyncio.py | 521 + .../services/model_service/__init__.py | 22 + .../services/model_service/async_client.py | 2571 ++ .../services/model_service/client.py | 2801 ++ .../services/model_service/pagers.py | 505 + .../model_service/transports/__init__.py | 33 + .../services/model_service/transports/base.py | 453 + .../services/model_service/transports/grpc.py | 877 + .../model_service/transports/grpc_asyncio.py | 876 + .../services/pipeline_service/__init__.py | 22 + .../services/pipeline_service/async_client.py | 1953 + .../services/pipeline_service/client.py | 2219 ++ .../services/pipeline_service/pagers.py | 262 + .../pipeline_service/transports/__init__.py | 33 + .../pipeline_service/transports/base.py | 383 + .../pipeline_service/transports/grpc.py | 758 + .../transports/grpc_asyncio.py | 757 + .../services/prediction_service/__init__.py | 22 + .../prediction_service/async_client.py | 1316 + .../services/prediction_service/client.py | 1519 + .../prediction_service/transports/__init__.py | 33 + .../prediction_service/transports/base.py | 274 + .../prediction_service/transports/grpc.py | 545 + .../transports/grpc_asyncio.py | 544 + .../specialist_pool_service/__init__.py | 22 + .../specialist_pool_service/async_client.py | 1452 + .../specialist_pool_service/client.py | 1646 + .../specialist_pool_service/pagers.py | 140 + .../transports/__init__.py | 33 + .../transports/base.py | 309 + .../transports/grpc.py | 601 + .../transports/grpc_asyncio.py | 600 + .../services/tensorboard_service/__init__.py | 22 + .../tensorboard_service/async_client.py | 3998 ++ .../services/tensorboard_service/client.py | 4219 +++ .../services/tensorboard_service/pagers.py | 628 + .../transports/__init__.py | 33 + .../tensorboard_service/transports/base.py | 637 + .../tensorboard_service/transports/grpc.py | 1222 + .../transports/grpc_asyncio.py | 1221 + .../services/vizier_service/__init__.py | 22 + .../services/vizier_service/async_client.py | 2264 ++ .../services/vizier_service/client.py | 2481 ++ .../services/vizier_service/pagers.py | 261 + .../vizier_service/transports/__init__.py | 33 + .../vizier_service/transports/base.py | 451 + .../vizier_service/transports/grpc.py | 876 + .../vizier_service/transports/grpc_asyncio.py | 875 + .../aiplatform_v1beta1/types/__init__.py | 1036 + .../types/accelerator_type.py | 40 + .../aiplatform_v1beta1/types/annotation.py | 129 + .../types/annotation_spec.py | 78 + .../aiplatform_v1beta1/types/artifact.py | 153 + .../types/batch_prediction_job.py | 527 + .../types/completion_stats.py | 72 + .../cloud/aiplatform_v1beta1/types/context.py | 136 + .../aiplatform_v1beta1/types/custom_job.py | 476 + .../aiplatform_v1beta1/types/data_item.py | 101 + .../types/data_labeling_job.py | 350 + .../cloud/aiplatform_v1beta1/types/dataset.py | 236 + .../types/dataset_service.py | 543 + .../types/deployed_index_ref.py | 49 + .../types/deployed_model_ref.py | 48 + .../types/encryption_spec.py | 47 + .../aiplatform_v1beta1/types/endpoint.py | 428 + .../types/endpoint_service.py | 395 + .../aiplatform_v1beta1/types/entity_type.py | 118 + .../cloud/aiplatform_v1beta1/types/env_var.py | 56 + .../cloud/aiplatform_v1beta1/types/event.py | 93 + .../aiplatform_v1beta1/types/execution.py | 149 + .../aiplatform_v1beta1/types/explanation.py | 947 + .../types/explanation_metadata.py | 467 + .../cloud/aiplatform_v1beta1/types/feature.py | 211 + .../types/feature_monitoring_stats.py | 124 + .../types/feature_selector.py | 62 + .../aiplatform_v1beta1/types/featurestore.py | 175 + .../types/featurestore_monitoring.py | 201 + .../types/featurestore_online_service.py | 381 + .../types/featurestore_service.py | 1665 + .../types/hyperparameter_tuning_job.py | 182 + .../cloud/aiplatform_v1beta1/types/index.py | 142 + .../types/index_endpoint.py | 384 + .../types/index_endpoint_service.py | 419 + .../aiplatform_v1beta1/types/index_service.py | 362 + .../cloud/aiplatform_v1beta1/types/io.py | 198 + .../aiplatform_v1beta1/types/job_service.py | 1093 + .../aiplatform_v1beta1/types/job_state.py | 42 + .../types/lineage_subgraph.py | 62 + .../types/machine_resources.py | 347 + .../types/manual_batch_tuning_parameters.py | 50 + .../types/metadata_schema.py | 96 + .../types/metadata_service.py | 1479 + .../types/metadata_store.py | 100 + .../types/migratable_resource.py | 228 + .../types/migration_service.py | 479 + .../cloud/aiplatform_v1beta1/types/model.py | 800 + .../types/model_deployment_monitoring_job.py | 490 + .../types/model_evaluation.py | 150 + .../types/model_evaluation_slice.py | 110 + .../types/model_monitoring.py | 420 + .../aiplatform_v1beta1/types/model_service.py | 853 + .../aiplatform_v1beta1/types/operation.py | 83 + .../types/pipeline_failure_policy.py | 41 + .../aiplatform_v1beta1/types/pipeline_job.py | 574 + .../types/pipeline_service.py | 416 + .../types/pipeline_state.py | 40 + .../types/prediction_service.py | 284 + .../types/specialist_pool.py | 86 + .../types/specialist_pool_service.py | 237 + .../cloud/aiplatform_v1beta1/types/study.py | 963 + .../aiplatform_v1beta1/types/tensorboard.py | 131 + .../types/tensorboard_data.py | 205 + .../types/tensorboard_experiment.py | 115 + .../types/tensorboard_run.py | 112 + .../types/tensorboard_service.py | 1224 + .../types/tensorboard_time_series.py | 153 + .../types/training_pipeline.py | 652 + .../cloud/aiplatform_v1beta1/types/types.py | 86 + .../types/unmanaged_container_model.py | 62 + .../types/user_action_reference.py | 75 + .../cloud/aiplatform_v1beta1/types/value.py | 69 + .../types/vizier_service.py | 589 + owl-bot-staging/v1beta1/mypy.ini | 3 + owl-bot-staging/v1beta1/noxfile.py | 180 + ...ed_dataset_service_create_dataset_async.py | 55 + ...ted_dataset_service_create_dataset_sync.py | 55 + ...ed_dataset_service_delete_dataset_async.py | 49 + ...ted_dataset_service_delete_dataset_sync.py | 49 + ...rated_dataset_service_export_data_async.py | 53 + ...erated_dataset_service_export_data_sync.py | 53 + ...taset_service_get_annotation_spec_async.py | 45 + ...ataset_service_get_annotation_spec_sync.py | 45 + ...rated_dataset_service_get_dataset_async.py | 45 + ...erated_dataset_service_get_dataset_sync.py | 45 + ...rated_dataset_service_import_data_async.py | 54 + ...erated_dataset_service_import_data_sync.py | 54 + ..._dataset_service_list_annotations_async.py | 46 + ...d_dataset_service_list_annotations_sync.py | 46 + ...d_dataset_service_list_data_items_async.py | 46 + ...ed_dataset_service_list_data_items_sync.py | 46 + ...ted_dataset_service_list_datasets_async.py | 46 + ...ated_dataset_service_list_datasets_sync.py | 46 + ...ed_dataset_service_update_dataset_async.py | 50 + ...ted_dataset_service_update_dataset_sync.py | 50 + ..._endpoint_service_create_endpoint_async.py | 53 + ...d_endpoint_service_create_endpoint_sync.py | 53 + ..._endpoint_service_delete_endpoint_async.py | 49 + ...d_endpoint_service_delete_endpoint_sync.py | 49 + ...ted_endpoint_service_deploy_model_async.py | 54 + ...ated_endpoint_service_deploy_model_sync.py | 54 + ...ted_endpoint_service_get_endpoint_async.py | 45 + ...ated_endpoint_service_get_endpoint_sync.py | 45 + ...d_endpoint_service_list_endpoints_async.py | 46 + ...ed_endpoint_service_list_endpoints_sync.py | 46 + ...d_endpoint_service_undeploy_model_async.py | 50 + ...ed_endpoint_service_undeploy_model_sync.py | 50 + ..._endpoint_service_update_endpoint_async.py | 48 + ...d_endpoint_service_update_endpoint_sync.py | 48 + ...rving_service_read_feature_values_async.py | 50 + ...erving_service_read_feature_values_sync.py | 50 + ...ice_streaming_read_feature_values_async.py | 51 + ...vice_streaming_read_feature_values_sync.py | 51 + ...ore_service_batch_create_features_async.py | 55 + ...tore_service_batch_create_features_sync.py | 55 + ...service_batch_read_feature_values_async.py | 62 + ..._service_batch_read_feature_values_sync.py | 62 + ...estore_service_create_entity_type_async.py | 50 + ...restore_service_create_entity_type_sync.py | 50 + ...aturestore_service_create_feature_async.py | 54 + ...eaturestore_service_create_feature_sync.py | 54 + ...store_service_create_featurestore_async.py | 50 + ...estore_service_create_featurestore_sync.py | 50 + ...estore_service_delete_entity_type_async.py | 49 + ...restore_service_delete_entity_type_sync.py | 49 + ...aturestore_service_delete_feature_async.py | 49 + ...eaturestore_service_delete_feature_sync.py | 49 + ...store_service_delete_featurestore_async.py | 49 + ...estore_service_delete_featurestore_sync.py | 49 + ...ore_service_export_feature_values_async.py | 57 + ...tore_service_export_feature_values_sync.py | 57 + ...turestore_service_get_entity_type_async.py | 45 + ...aturestore_service_get_entity_type_sync.py | 45 + ..._featurestore_service_get_feature_async.py | 45 + ...d_featurestore_service_get_feature_sync.py | 45 + ...urestore_service_get_featurestore_async.py | 45 + ...turestore_service_get_featurestore_sync.py | 45 + ...ore_service_import_feature_values_async.py | 58 + ...tore_service_import_feature_values_sync.py | 58 + ...restore_service_list_entity_types_async.py | 46 + ...urestore_service_list_entity_types_sync.py | 46 + ...eaturestore_service_list_features_async.py | 46 + ...featurestore_service_list_features_sync.py | 46 + ...estore_service_list_featurestores_async.py | 46 + ...restore_service_list_featurestores_sync.py | 46 + ...turestore_service_search_features_async.py | 46 + ...aturestore_service_search_features_sync.py | 46 + ...estore_service_update_entity_type_async.py | 44 + ...restore_service_update_entity_type_sync.py | 44 + ...aturestore_service_update_feature_async.py | 48 + ...eaturestore_service_update_feature_sync.py | 48 + ...store_service_update_featurestore_async.py | 48 + ...estore_service_update_featurestore_sync.py | 48 + ...int_service_create_index_endpoint_async.py | 53 + ...oint_service_create_index_endpoint_sync.py | 53 + ...int_service_delete_index_endpoint_async.py | 49 + ...oint_service_delete_index_endpoint_sync.py | 49 + ...dex_endpoint_service_deploy_index_async.py | 54 + ...ndex_endpoint_service_deploy_index_sync.py | 54 + ...dpoint_service_get_index_endpoint_async.py | 45 + ...ndpoint_service_get_index_endpoint_sync.py | 45 + ...oint_service_list_index_endpoints_async.py | 46 + ...point_service_list_index_endpoints_sync.py | 46 + ...int_service_mutate_deployed_index_async.py | 54 + ...oint_service_mutate_deployed_index_sync.py | 54 + ...x_endpoint_service_undeploy_index_async.py | 50 + ...ex_endpoint_service_undeploy_index_sync.py | 50 + ...int_service_update_index_endpoint_async.py | 48 + ...oint_service_update_index_endpoint_sync.py | 48 + ...erated_index_service_create_index_async.py | 53 + ...nerated_index_service_create_index_sync.py | 53 + ...erated_index_service_delete_index_async.py | 49 + ...nerated_index_service_delete_index_sync.py | 49 + ...generated_index_service_get_index_async.py | 45 + ..._generated_index_service_get_index_sync.py | 45 + ...erated_index_service_list_indexes_async.py | 46 + ...nerated_index_service_list_indexes_sync.py | 46 + ...erated_index_service_update_index_async.py | 52 + ...nerated_index_service_update_index_sync.py | 52 + ...rvice_cancel_batch_prediction_job_async.py | 43 + ...ervice_cancel_batch_prediction_job_sync.py | 43 + ...ted_job_service_cancel_custom_job_async.py | 43 + ...ated_job_service_cancel_custom_job_sync.py | 43 + ..._service_cancel_data_labeling_job_async.py | 43 + ...b_service_cancel_data_labeling_job_sync.py | 43 + ..._cancel_hyperparameter_tuning_job_async.py | 43 + ...e_cancel_hyperparameter_tuning_job_sync.py | 43 + ...rvice_create_batch_prediction_job_async.py | 53 + ...ervice_create_batch_prediction_job_sync.py | 53 + ...ted_job_service_create_custom_job_async.py | 50 + ...ated_job_service_create_custom_job_sync.py | 50 + ..._service_create_data_labeling_job_async.py | 54 + ...b_service_create_data_labeling_job_sync.py | 54 + ..._create_hyperparameter_tuning_job_async.py | 57 + ...e_create_hyperparameter_tuning_job_sync.py | 57 + ...e_model_deployment_monitoring_job_async.py | 50 + ...te_model_deployment_monitoring_job_sync.py | 50 + ...rvice_delete_batch_prediction_job_async.py | 49 + ...ervice_delete_batch_prediction_job_sync.py | 49 + ...ted_job_service_delete_custom_job_async.py | 49 + ...ated_job_service_delete_custom_job_sync.py | 49 + ..._service_delete_data_labeling_job_async.py | 49 + ...b_service_delete_data_labeling_job_sync.py | 49 + ..._delete_hyperparameter_tuning_job_async.py | 49 + ...e_delete_hyperparameter_tuning_job_sync.py | 49 + ...e_model_deployment_monitoring_job_async.py | 49 + ...te_model_deployment_monitoring_job_sync.py | 49 + ..._service_get_batch_prediction_job_async.py | 45 + ...b_service_get_batch_prediction_job_sync.py | 45 + ...erated_job_service_get_custom_job_async.py | 45 + ...nerated_job_service_get_custom_job_sync.py | 45 + ...job_service_get_data_labeling_job_async.py | 45 + ..._job_service_get_data_labeling_job_sync.py | 45 + ...ice_get_hyperparameter_tuning_job_async.py | 45 + ...vice_get_hyperparameter_tuning_job_sync.py | 45 + ...t_model_deployment_monitoring_job_async.py | 45 + ...et_model_deployment_monitoring_job_sync.py | 45 + ...ervice_list_batch_prediction_jobs_async.py | 46 + ...service_list_batch_prediction_jobs_sync.py | 46 + ...ated_job_service_list_custom_jobs_async.py | 46 + ...rated_job_service_list_custom_jobs_sync.py | 46 + ...b_service_list_data_labeling_jobs_async.py | 46 + ...ob_service_list_data_labeling_jobs_sync.py | 46 + ...e_list_hyperparameter_tuning_jobs_async.py | 46 + ...ce_list_hyperparameter_tuning_jobs_sync.py | 46 + ..._model_deployment_monitoring_jobs_async.py | 46 + ...t_model_deployment_monitoring_jobs_sync.py | 46 + ...e_model_deployment_monitoring_job_async.py | 43 + ...se_model_deployment_monitoring_job_sync.py | 43 + ...e_model_deployment_monitoring_job_async.py | 43 + ...me_model_deployment_monitoring_job_sync.py | 43 + ...oyment_monitoring_stats_anomalies_async.py | 47 + ...loyment_monitoring_stats_anomalies_sync.py | 47 + ...e_model_deployment_monitoring_job_async.py | 53 + ...te_model_deployment_monitoring_job_sync.py | 53 + ..._context_artifacts_and_executions_async.py | 45 + ...d_context_artifacts_and_executions_sync.py | 45 + ...data_service_add_context_children_async.py | 45 + ...adata_service_add_context_children_sync.py | 45 + ...data_service_add_execution_events_async.py | 45 + ...adata_service_add_execution_events_sync.py | 45 + ..._metadata_service_create_artifact_async.py | 45 + ...d_metadata_service_create_artifact_sync.py | 45 + ...d_metadata_service_create_context_async.py | 45 + ...ed_metadata_service_create_context_sync.py | 45 + ...metadata_service_create_execution_async.py | 45 + ..._metadata_service_create_execution_sync.py | 45 + ...ta_service_create_metadata_schema_async.py | 49 + ...ata_service_create_metadata_schema_sync.py | 49 + ...ata_service_create_metadata_store_async.py | 49 + ...data_service_create_metadata_store_sync.py | 49 + ..._metadata_service_delete_artifact_async.py | 49 + ...d_metadata_service_delete_artifact_sync.py | 49 + ...d_metadata_service_delete_context_async.py | 49 + ...ed_metadata_service_delete_context_sync.py | 49 + ...metadata_service_delete_execution_async.py | 49 + ..._metadata_service_delete_execution_sync.py | 49 + ...ata_service_delete_metadata_store_async.py | 49 + ...data_service_delete_metadata_store_sync.py | 49 + ...ted_metadata_service_get_artifact_async.py | 45 + ...ated_metadata_service_get_artifact_sync.py | 45 + ...ated_metadata_service_get_context_async.py | 45 + ...rated_metadata_service_get_context_sync.py | 45 + ...ed_metadata_service_get_execution_async.py | 45 + ...ted_metadata_service_get_execution_sync.py | 45 + ...adata_service_get_metadata_schema_async.py | 45 + ...tadata_service_get_metadata_schema_sync.py | 45 + ...tadata_service_get_metadata_store_async.py | 45 + ...etadata_service_get_metadata_store_sync.py | 45 + ...d_metadata_service_list_artifacts_async.py | 46 + ...ed_metadata_service_list_artifacts_sync.py | 46 + ...ed_metadata_service_list_contexts_async.py | 46 + ...ted_metadata_service_list_contexts_sync.py | 46 + ..._metadata_service_list_executions_async.py | 46 + ...d_metadata_service_list_executions_sync.py | 46 + ...ata_service_list_metadata_schemas_async.py | 46 + ...data_service_list_metadata_schemas_sync.py | 46 + ...data_service_list_metadata_stores_async.py | 46 + ...adata_service_list_metadata_stores_sync.py | 46 + ..._metadata_service_purge_artifacts_async.py | 50 + ...d_metadata_service_purge_artifacts_sync.py | 50 + ...d_metadata_service_purge_contexts_async.py | 50 + ...ed_metadata_service_purge_contexts_sync.py | 50 + ...metadata_service_purge_executions_async.py | 50 + ..._metadata_service_purge_executions_sync.py | 50 + ...e_query_artifact_lineage_subgraph_async.py | 45 + ...ce_query_artifact_lineage_subgraph_sync.py | 45 + ...ce_query_context_lineage_subgraph_async.py | 45 + ...ice_query_context_lineage_subgraph_sync.py | 45 + ...uery_execution_inputs_and_outputs_async.py | 45 + ...query_execution_inputs_and_outputs_sync.py | 45 + ..._metadata_service_update_artifact_async.py | 44 + ...d_metadata_service_update_artifact_sync.py | 44 + ...d_metadata_service_update_context_async.py | 44 + ...ed_metadata_service_update_context_sync.py | 44 + ...metadata_service_update_execution_async.py | 44 + ..._metadata_service_update_execution_sync.py | 44 + ...n_service_batch_migrate_resources_async.py | 55 + ...on_service_batch_migrate_resources_sync.py | 55 + ...rvice_search_migratable_resources_async.py | 46 + ...ervice_search_migratable_resources_sync.py | 46 + ...erated_model_service_delete_model_async.py | 49 + ...nerated_model_service_delete_model_sync.py | 49 + ...odel_service_delete_model_version_async.py | 49 + ...model_service_delete_model_version_sync.py | 49 + ...erated_model_service_export_model_async.py | 49 + ...nerated_model_service_export_model_sync.py | 49 + ...generated_model_service_get_model_async.py | 45 + ...odel_service_get_model_evaluation_async.py | 45 + ...ervice_get_model_evaluation_slice_async.py | 45 + ...service_get_model_evaluation_slice_sync.py | 45 + ...model_service_get_model_evaluation_sync.py | 45 + ..._generated_model_service_get_model_sync.py | 45 + ...l_service_import_model_evaluation_async.py | 45 + ...el_service_import_model_evaluation_sync.py | 45 + ...vice_list_model_evaluation_slices_async.py | 46 + ...rvice_list_model_evaluation_slices_sync.py | 46 + ...el_service_list_model_evaluations_async.py | 46 + ...del_service_list_model_evaluations_sync.py | 46 + ...model_service_list_model_versions_async.py | 46 + ..._model_service_list_model_versions_sync.py | 46 + ...nerated_model_service_list_models_async.py | 46 + ...enerated_model_service_list_models_sync.py | 46 + ...del_service_merge_version_aliases_async.py | 46 + ...odel_service_merge_version_aliases_sync.py | 46 + ...ervice_update_explanation_dataset_async.py | 49 + ...service_update_explanation_dataset_sync.py | 49 + ...erated_model_service_update_model_async.py | 48 + ...nerated_model_service_update_model_sync.py | 48 + ...erated_model_service_upload_model_async.py | 53 + ...nerated_model_service_upload_model_sync.py | 53 + ...eline_service_cancel_pipeline_job_async.py | 43 + ...peline_service_cancel_pipeline_job_sync.py | 43 + ..._service_cancel_training_pipeline_async.py | 43 + ...e_service_cancel_training_pipeline_sync.py | 43 + ...eline_service_create_pipeline_job_async.py | 45 + ...peline_service_create_pipeline_job_sync.py | 45 + ..._service_create_training_pipeline_async.py | 51 + ...e_service_create_training_pipeline_sync.py | 51 + ...eline_service_delete_pipeline_job_async.py | 49 + ...peline_service_delete_pipeline_job_sync.py | 49 + ..._service_delete_training_pipeline_async.py | 49 + ...e_service_delete_training_pipeline_sync.py | 49 + ...pipeline_service_get_pipeline_job_async.py | 45 + ..._pipeline_service_get_pipeline_job_sync.py | 45 + ...ine_service_get_training_pipeline_async.py | 45 + ...line_service_get_training_pipeline_sync.py | 45 + ...peline_service_list_pipeline_jobs_async.py | 46 + ...ipeline_service_list_pipeline_jobs_sync.py | 46 + ...e_service_list_training_pipelines_async.py | 46 + ...ne_service_list_training_pipelines_sync.py | 46 + ...erated_prediction_service_explain_async.py | 49 + ...nerated_prediction_service_explain_sync.py | 49 + ...erated_prediction_service_predict_async.py | 49 + ...nerated_prediction_service_predict_sync.py | 49 + ...ed_prediction_service_raw_predict_async.py | 45 + ...ted_prediction_service_raw_predict_sync.py | 45 + ...ol_service_create_specialist_pool_async.py | 54 + ...ool_service_create_specialist_pool_sync.py | 54 + ...ol_service_delete_specialist_pool_async.py | 49 + ...ool_service_delete_specialist_pool_sync.py | 49 + ..._pool_service_get_specialist_pool_async.py | 45 + ...t_pool_service_get_specialist_pool_sync.py | 45 + ...ool_service_list_specialist_pools_async.py | 46 + ...pool_service_list_specialist_pools_sync.py | 46 + ...ol_service_update_specialist_pool_async.py | 53 + ...ool_service_update_specialist_pool_sync.py | 53 + ...ice_batch_create_tensorboard_runs_async.py | 51 + ...vice_batch_create_tensorboard_runs_sync.py | 51 + ...ch_create_tensorboard_time_series_async.py | 51 + ...tch_create_tensorboard_time_series_sync.py | 51 + ...read_tensorboard_time_series_data_async.py | 46 + ..._read_tensorboard_time_series_data_sync.py | 46 + ...rboard_service_create_tensorboard_async.py | 53 + ...ice_create_tensorboard_experiment_async.py | 46 + ...vice_create_tensorboard_experiment_sync.py | 46 + ...rd_service_create_tensorboard_run_async.py | 50 + ...ard_service_create_tensorboard_run_sync.py | 50 + ...orboard_service_create_tensorboard_sync.py | 53 + ...ce_create_tensorboard_time_series_async.py | 50 + ...ice_create_tensorboard_time_series_sync.py | 50 + ...rboard_service_delete_tensorboard_async.py | 49 + ...ice_delete_tensorboard_experiment_async.py | 49 + ...vice_delete_tensorboard_experiment_sync.py | 49 + ...rd_service_delete_tensorboard_run_async.py | 49 + ...ard_service_delete_tensorboard_run_sync.py | 49 + ...orboard_service_delete_tensorboard_sync.py | 49 + ...ce_delete_tensorboard_time_series_async.py | 49 + ...ice_delete_tensorboard_time_series_sync.py | 49 + ...port_tensorboard_time_series_data_async.py | 46 + ...xport_tensorboard_time_series_data_sync.py | 46 + ...nsorboard_service_get_tensorboard_async.py | 45 + ...ervice_get_tensorboard_experiment_async.py | 45 + ...service_get_tensorboard_experiment_sync.py | 45 + ...board_service_get_tensorboard_run_async.py | 45 + ...rboard_service_get_tensorboard_run_sync.py | 45 + ...ensorboard_service_get_tensorboard_sync.py | 45 + ...rvice_get_tensorboard_time_series_async.py | 45 + ...ervice_get_tensorboard_time_series_sync.py | 45 + ...vice_list_tensorboard_experiments_async.py | 46 + ...rvice_list_tensorboard_experiments_sync.py | 46 + ...ard_service_list_tensorboard_runs_async.py | 46 + ...oard_service_list_tensorboard_runs_sync.py | 46 + ...vice_list_tensorboard_time_series_async.py | 46 + ...rvice_list_tensorboard_time_series_sync.py | 46 + ...orboard_service_list_tensorboards_async.py | 46 + ...sorboard_service_list_tensorboards_sync.py | 46 + ...ervice_read_tensorboard_blob_data_async.py | 46 + ...service_read_tensorboard_blob_data_sync.py | 46 + ...read_tensorboard_time_series_data_async.py | 45 + ..._read_tensorboard_time_series_data_sync.py | 45 + ...rboard_service_update_tensorboard_async.py | 52 + ...ice_update_tensorboard_experiment_async.py | 44 + ...vice_update_tensorboard_experiment_sync.py | 44 + ...rd_service_update_tensorboard_run_async.py | 48 + ...ard_service_update_tensorboard_run_sync.py | 48 + ...orboard_service_update_tensorboard_sync.py | 52 + ...ce_update_tensorboard_time_series_async.py | 49 + ...ice_update_tensorboard_time_series_sync.py | 49 + ...write_tensorboard_experiment_data_async.py | 51 + ..._write_tensorboard_experiment_data_sync.py | 51 + ...ervice_write_tensorboard_run_data_async.py | 50 + ...service_write_tensorboard_run_data_sync.py | 50 + ...ier_service_add_trial_measurement_async.py | 45 + ...zier_service_add_trial_measurement_sync.py | 45 + ..._check_trial_early_stopping_state_async.py | 49 + ...e_check_trial_early_stopping_state_sync.py | 49 + ...ted_vizier_service_complete_trial_async.py | 45 + ...ated_vizier_service_complete_trial_sync.py | 45 + ...rated_vizier_service_create_study_async.py | 54 + ...erated_vizier_service_create_study_sync.py | 54 + ...rated_vizier_service_create_trial_async.py | 45 + ...erated_vizier_service_create_trial_sync.py | 45 + ...rated_vizier_service_delete_study_async.py | 43 + ...erated_vizier_service_delete_study_sync.py | 43 + ...rated_vizier_service_delete_trial_async.py | 43 + ...erated_vizier_service_delete_trial_sync.py | 43 + ...enerated_vizier_service_get_study_async.py | 45 + ...generated_vizier_service_get_study_sync.py | 45 + ...enerated_vizier_service_get_trial_async.py | 45 + ...generated_vizier_service_get_trial_sync.py | 45 + ...izier_service_list_optimal_trials_async.py | 45 + ...vizier_service_list_optimal_trials_sync.py | 45 + ...rated_vizier_service_list_studies_async.py | 46 + ...erated_vizier_service_list_studies_sync.py | 46 + ...erated_vizier_service_list_trials_async.py | 46 + ...nerated_vizier_service_list_trials_sync.py | 46 + ...rated_vizier_service_lookup_study_async.py | 46 + ...erated_vizier_service_lookup_study_sync.py | 46 + ...nerated_vizier_service_stop_trial_async.py | 45 + ...enerated_vizier_service_stop_trial_sync.py | 45 + ...ted_vizier_service_suggest_trials_async.py | 51 + ...ated_vizier_service_suggest_trials_sync.py | 51 + .../snippet_metadata_aiplatform_v1beta1.json | 31047 ++++++++++++++++ .../fixup_aiplatform_v1beta1_keywords.py | 364 + .../fixup_definition_v1beta1_keywords.py | 175 + .../fixup_instance_v1beta1_keywords.py | 175 + .../scripts/fixup_params_v1beta1_keywords.py | 175 + .../fixup_prediction_v1beta1_keywords.py | 175 + owl-bot-staging/v1beta1/setup.py | 59 + owl-bot-staging/v1beta1/tests/__init__.py | 16 + .../v1beta1/tests/unit/__init__.py | 16 + .../v1beta1/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/aiplatform_v1beta1/__init__.py | 16 + .../test_dataset_service.py | 5583 +++ .../test_endpoint_service.py | 4536 +++ ...est_featurestore_online_serving_service.py | 2953 ++ .../test_featurestore_service.py | 8174 ++++ .../test_index_endpoint_service.py | 4694 +++ .../aiplatform_v1beta1/test_index_service.py | 3945 ++ .../aiplatform_v1beta1/test_job_service.py | 10674 ++++++ .../test_metadata_service.py | 11200 ++++++ .../test_migration_service.py | 3321 ++ .../aiplatform_v1beta1/test_model_service.py | 7099 ++++ .../test_pipeline_service.py | 5529 +++ .../test_prediction_service.py | 3307 ++ .../test_specialist_pool_service.py | 3925 ++ .../test_tensorboard_service.py | 10365 ++++++ .../aiplatform_v1beta1/test_vizier_service.py | 6176 +++ .../unit/gapic/definition_v1beta1/__init__.py | 16 + .../unit/gapic/instance_v1beta1/__init__.py | 16 + .../unit/gapic/params_v1beta1/__init__.py | 16 + .../unit/gapic/prediction_v1beta1/__init__.py | 16 + 1406 files changed, 564449 insertions(+) create mode 100644 owl-bot-staging/v1/.coveragerc create mode 100644 owl-bot-staging/v1/.flake8 create mode 100644 owl-bot-staging/v1/MANIFEST.in create mode 100644 owl-bot-staging/v1/README.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst create mode 100644 owl-bot-staging/v1/docs/conf.py create mode 100644 owl-bot-staging/v1/docs/definition_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/definition_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/index.rst create mode 100644 owl-bot-staging/v1/docs/instance_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/instance_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/params_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/params_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/prediction_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/prediction_v1/types.rst create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_monitoring.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_failure_policy.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/unmanaged_container_model.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py create mode 100644 owl-bot-staging/v1/mypy.ini create mode 100644 owl-bot-staging/v1/noxfile.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_async.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_sync.py create mode 100644 owl-bot-staging/v1/samples/generated_samples/snippet_metadata_aiplatform_v1.json create mode 100644 owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py create mode 100644 owl-bot-staging/v1/setup.py create mode 100644 owl-bot-staging/v1/tests/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py create mode 100644 owl-bot-staging/v1beta1/.coveragerc create mode 100644 owl-bot-staging/v1beta1/.flake8 create mode 100644 owl-bot-staging/v1beta1/MANIFEST.in create mode 100644 owl-bot-staging/v1beta1/README.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/conf.py create mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/index.rst create mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_failure_policy.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py create mode 100644 owl-bot-staging/v1beta1/mypy.ini create mode 100644 owl-bot-staging/v1beta1/noxfile.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py create mode 100644 owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/setup.py create mode 100644 owl-bot-staging/v1beta1/tests/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc new file mode 100644 index 0000000000..01d28d4b2c --- /dev/null +++ b/owl-bot-staging/v1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/.flake8 b/owl-bot-staging/v1/.flake8 new file mode 100644 index 0000000000..29227d4cf4 --- /dev/null +++ b/owl-bot-staging/v1/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in new file mode 100644 index 0000000000..d55f1f202e --- /dev/null +++ b/owl-bot-staging/v1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition *.py +recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst new file mode 100644 index 0000000000..ad49c55e02 --- /dev/null +++ b/owl-bot-staging/v1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Trainingjob Definition API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Trainingjob Definition API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst new file mode 100644 index 0000000000..79ddc4623f --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst @@ -0,0 +1,10 @@ +DatasetService +-------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.dataset_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.dataset_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst new file mode 100644 index 0000000000..3b900f851e --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst @@ -0,0 +1,10 @@ +EndpointService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst new file mode 100644 index 0000000000..ace5b9dd1a --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst @@ -0,0 +1,6 @@ +FeaturestoreOnlineServingService +-------------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.featurestore_online_serving_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst new file mode 100644 index 0000000000..90a303a4c4 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst @@ -0,0 +1,10 @@ +FeaturestoreService +------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.featurestore_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.featurestore_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst new file mode 100644 index 0000000000..9a87b81082 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst @@ -0,0 +1,10 @@ +IndexEndpointService +-------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.index_endpoint_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.index_endpoint_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst new file mode 100644 index 0000000000..b07b444c23 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst @@ -0,0 +1,10 @@ +IndexService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1.services.index_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.index_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst new file mode 100644 index 0000000000..6afcbbb4d0 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst @@ -0,0 +1,10 @@ +JobService +---------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.job_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.job_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst new file mode 100644 index 0000000000..419fd0a850 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst @@ -0,0 +1,10 @@ +MetadataService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.metadata_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.metadata_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst new file mode 100644 index 0000000000..ac0a5fb3aa --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst @@ -0,0 +1,10 @@ +MigrationService +---------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.migration_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.migration_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst new file mode 100644 index 0000000000..8baab43cbc --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst @@ -0,0 +1,10 @@ +ModelService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1.services.model_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.model_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst new file mode 100644 index 0000000000..bbf6b32092 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst @@ -0,0 +1,10 @@ +PipelineService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst new file mode 100644 index 0000000000..fdda504879 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.prediction_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/services.rst b/owl-bot-staging/v1/docs/aiplatform_v1/services.rst new file mode 100644 index 0000000000..0a6443a972 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/services.rst @@ -0,0 +1,20 @@ +Services for Google Cloud Aiplatform v1 API +=========================================== +.. toctree:: + :maxdepth: 2 + + dataset_service + endpoint_service + featurestore_online_serving_service + featurestore_service + index_endpoint_service + index_service + job_service + metadata_service + migration_service + model_service + pipeline_service + prediction_service + specialist_pool_service + tensorboard_service + vizier_service diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst new file mode 100644 index 0000000000..4a6f288894 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst @@ -0,0 +1,10 @@ +SpecialistPoolService +--------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst new file mode 100644 index 0000000000..0fa17e10b8 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst @@ -0,0 +1,10 @@ +TensorboardService +------------------------------------ + +.. automodule:: google.cloud.aiplatform_v1.services.tensorboard_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.tensorboard_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/types.rst b/owl-bot-staging/v1/docs/aiplatform_v1/types.rst new file mode 100644 index 0000000000..ad4454843f --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform v1 API +======================================== + +.. automodule:: google.cloud.aiplatform_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst new file mode 100644 index 0000000000..efdbafe3c8 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst @@ -0,0 +1,10 @@ +VizierService +------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.vizier_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.vizier_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py new file mode 100644 index 0000000000..3181d4edca --- /dev/null +++ b/owl-bot-staging/v1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-trainingjob-definition documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "4.0.1" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-trainingjob-definition" +copyright = u"2022, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Trainingjob Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-trainingjob-definition-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-cloud-aiplatform-v1-schema-trainingjob-definition.tex", + u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-cloud-aiplatform-v1-schema-trainingjob-definition", + u"Google Cloud Aiplatform V1 Schema Trainingjob Definition Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-cloud-aiplatform-v1-schema-trainingjob-definition", + u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", + author, + "google-cloud-aiplatform-v1-schema-trainingjob-definition", + "GAPIC library for Google Cloud Aiplatform V1 Schema Trainingjob Definition API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/definition_v1/services.rst b/owl-bot-staging/v1/docs/definition_v1/services.rst new file mode 100644 index 0000000000..ba6b1940e8 --- /dev/null +++ b/owl-bot-staging/v1/docs/definition_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API +============================================================================ +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/definition_v1/types.rst b/owl-bot-staging/v1/docs/definition_v1/types.rst new file mode 100644 index 0000000000..a1df2bce25 --- /dev/null +++ b/owl-bot-staging/v1/docs/definition_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API +========================================================================= + +.. automodule:: google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst new file mode 100644 index 0000000000..ad6ae57609 --- /dev/null +++ b/owl-bot-staging/v1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + definition_v1/services + definition_v1/types diff --git a/owl-bot-staging/v1/docs/instance_v1/services.rst b/owl-bot-staging/v1/docs/instance_v1/services.rst new file mode 100644 index 0000000000..50c011c69a --- /dev/null +++ b/owl-bot-staging/v1/docs/instance_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +====================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/instance_v1/types.rst b/owl-bot-staging/v1/docs/instance_v1/types.rst new file mode 100644 index 0000000000..564ab013ee --- /dev/null +++ b/owl-bot-staging/v1/docs/instance_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +=================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/params_v1/services.rst b/owl-bot-staging/v1/docs/params_v1/services.rst new file mode 100644 index 0000000000..bf08ea6e98 --- /dev/null +++ b/owl-bot-staging/v1/docs/params_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Params v1 API +==================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/params_v1/types.rst b/owl-bot-staging/v1/docs/params_v1/types.rst new file mode 100644 index 0000000000..956ef5224d --- /dev/null +++ b/owl-bot-staging/v1/docs/params_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API +================================================================= + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/prediction_v1/services.rst b/owl-bot-staging/v1/docs/prediction_v1/services.rst new file mode 100644 index 0000000000..ad6f034387 --- /dev/null +++ b/owl-bot-staging/v1/docs/prediction_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +======================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/prediction_v1/types.rst b/owl-bot-staging/v1/docs/prediction_v1/types.rst new file mode 100644 index 0000000000..a97faf34de --- /dev/null +++ b/owl-bot-staging/v1/docs/prediction_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +===================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py new file mode 100644 index 0000000000..fb76121336 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py @@ -0,0 +1,953 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform_v1.services.dataset_service.client import DatasetServiceClient +from google.cloud.aiplatform_v1.services.dataset_service.async_client import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1.services.endpoint_service.client import EndpointServiceClient +from google.cloud.aiplatform_v1.services.endpoint_service.async_client import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service.client import FeaturestoreOnlineServingServiceClient +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service.async_client import FeaturestoreOnlineServingServiceAsyncClient +from google.cloud.aiplatform_v1.services.featurestore_service.client import FeaturestoreServiceClient +from google.cloud.aiplatform_v1.services.featurestore_service.async_client import FeaturestoreServiceAsyncClient +from google.cloud.aiplatform_v1.services.index_endpoint_service.client import IndexEndpointServiceClient +from google.cloud.aiplatform_v1.services.index_endpoint_service.async_client import IndexEndpointServiceAsyncClient +from google.cloud.aiplatform_v1.services.index_service.client import IndexServiceClient +from google.cloud.aiplatform_v1.services.index_service.async_client import IndexServiceAsyncClient +from google.cloud.aiplatform_v1.services.job_service.client import JobServiceClient +from google.cloud.aiplatform_v1.services.job_service.async_client import JobServiceAsyncClient +from google.cloud.aiplatform_v1.services.metadata_service.client import MetadataServiceClient +from google.cloud.aiplatform_v1.services.metadata_service.async_client import MetadataServiceAsyncClient +from google.cloud.aiplatform_v1.services.migration_service.client import MigrationServiceClient +from google.cloud.aiplatform_v1.services.migration_service.async_client import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1.services.model_service.client import ModelServiceClient +from google.cloud.aiplatform_v1.services.model_service.async_client import ModelServiceAsyncClient +from google.cloud.aiplatform_v1.services.pipeline_service.client import PipelineServiceClient +from google.cloud.aiplatform_v1.services.pipeline_service.async_client import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1.services.prediction_service.client import PredictionServiceClient +from google.cloud.aiplatform_v1.services.prediction_service.async_client import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1.services.specialist_pool_service.client import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient +from google.cloud.aiplatform_v1.services.tensorboard_service.client import TensorboardServiceClient +from google.cloud.aiplatform_v1.services.tensorboard_service.async_client import TensorboardServiceAsyncClient +from google.cloud.aiplatform_v1.services.vizier_service.client import VizierServiceClient +from google.cloud.aiplatform_v1.services.vizier_service.async_client import VizierServiceAsyncClient + +from google.cloud.aiplatform_v1.types.accelerator_type import AcceleratorType +from google.cloud.aiplatform_v1.types.annotation import Annotation +from google.cloud.aiplatform_v1.types.annotation_spec import AnnotationSpec +from google.cloud.aiplatform_v1.types.artifact import Artifact +from google.cloud.aiplatform_v1.types.batch_prediction_job import BatchPredictionJob +from google.cloud.aiplatform_v1.types.completion_stats import CompletionStats +from google.cloud.aiplatform_v1.types.context import Context +from google.cloud.aiplatform_v1.types.custom_job import ContainerSpec +from google.cloud.aiplatform_v1.types.custom_job import CustomJob +from google.cloud.aiplatform_v1.types.custom_job import CustomJobSpec +from google.cloud.aiplatform_v1.types.custom_job import PythonPackageSpec +from google.cloud.aiplatform_v1.types.custom_job import Scheduling +from google.cloud.aiplatform_v1.types.custom_job import WorkerPoolSpec +from google.cloud.aiplatform_v1.types.data_item import DataItem +from google.cloud.aiplatform_v1.types.data_labeling_job import ActiveLearningConfig +from google.cloud.aiplatform_v1.types.data_labeling_job import DataLabelingJob +from google.cloud.aiplatform_v1.types.data_labeling_job import SampleConfig +from google.cloud.aiplatform_v1.types.data_labeling_job import TrainingConfig +from google.cloud.aiplatform_v1.types.dataset import Dataset +from google.cloud.aiplatform_v1.types.dataset import ExportDataConfig +from google.cloud.aiplatform_v1.types.dataset import ImportDataConfig +from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetOperationMetadata +from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetRequest +from google.cloud.aiplatform_v1.types.dataset_service import DeleteDatasetRequest +from google.cloud.aiplatform_v1.types.dataset_service import ExportDataOperationMetadata +from google.cloud.aiplatform_v1.types.dataset_service import ExportDataRequest +from google.cloud.aiplatform_v1.types.dataset_service import ExportDataResponse +from google.cloud.aiplatform_v1.types.dataset_service import GetAnnotationSpecRequest +from google.cloud.aiplatform_v1.types.dataset_service import GetDatasetRequest +from google.cloud.aiplatform_v1.types.dataset_service import ImportDataOperationMetadata +from google.cloud.aiplatform_v1.types.dataset_service import ImportDataRequest +from google.cloud.aiplatform_v1.types.dataset_service import ImportDataResponse +from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsRequest +from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsResponse +from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsRequest +from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsResponse +from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsRequest +from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsResponse +from google.cloud.aiplatform_v1.types.dataset_service import UpdateDatasetRequest +from google.cloud.aiplatform_v1.types.deployed_index_ref import DeployedIndexRef +from google.cloud.aiplatform_v1.types.deployed_model_ref import DeployedModelRef +from google.cloud.aiplatform_v1.types.encryption_spec import EncryptionSpec +from google.cloud.aiplatform_v1.types.endpoint import DeployedModel +from google.cloud.aiplatform_v1.types.endpoint import Endpoint +from google.cloud.aiplatform_v1.types.endpoint import PredictRequestResponseLoggingConfig +from google.cloud.aiplatform_v1.types.endpoint import PrivateEndpoints +from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointOperationMetadata +from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointRequest +from google.cloud.aiplatform_v1.types.endpoint_service import DeleteEndpointRequest +from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelOperationMetadata +from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelRequest +from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelResponse +from google.cloud.aiplatform_v1.types.endpoint_service import GetEndpointRequest +from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsRequest +from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsResponse +from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelOperationMetadata +from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelRequest +from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelResponse +from google.cloud.aiplatform_v1.types.endpoint_service import UpdateEndpointRequest +from google.cloud.aiplatform_v1.types.entity_type import EntityType +from google.cloud.aiplatform_v1.types.env_var import EnvVar +from google.cloud.aiplatform_v1.types.event import Event +from google.cloud.aiplatform_v1.types.execution import Execution +from google.cloud.aiplatform_v1.types.explanation import Attribution +from google.cloud.aiplatform_v1.types.explanation import BlurBaselineConfig +from google.cloud.aiplatform_v1.types.explanation import ExamplesOverride +from google.cloud.aiplatform_v1.types.explanation import ExamplesRestrictionsNamespace +from google.cloud.aiplatform_v1.types.explanation import Explanation +from google.cloud.aiplatform_v1.types.explanation import ExplanationMetadataOverride +from google.cloud.aiplatform_v1.types.explanation import ExplanationParameters +from google.cloud.aiplatform_v1.types.explanation import ExplanationSpec +from google.cloud.aiplatform_v1.types.explanation import ExplanationSpecOverride +from google.cloud.aiplatform_v1.types.explanation import FeatureNoiseSigma +from google.cloud.aiplatform_v1.types.explanation import IntegratedGradientsAttribution +from google.cloud.aiplatform_v1.types.explanation import ModelExplanation +from google.cloud.aiplatform_v1.types.explanation import Neighbor +from google.cloud.aiplatform_v1.types.explanation import SampledShapleyAttribution +from google.cloud.aiplatform_v1.types.explanation import SmoothGradConfig +from google.cloud.aiplatform_v1.types.explanation import XraiAttribution +from google.cloud.aiplatform_v1.types.explanation_metadata import ExplanationMetadata +from google.cloud.aiplatform_v1.types.feature import Feature +from google.cloud.aiplatform_v1.types.feature_monitoring_stats import FeatureStatsAnomaly +from google.cloud.aiplatform_v1.types.feature_selector import FeatureSelector +from google.cloud.aiplatform_v1.types.feature_selector import IdMatcher +from google.cloud.aiplatform_v1.types.featurestore import Featurestore +from google.cloud.aiplatform_v1.types.featurestore_monitoring import FeaturestoreMonitoringConfig +from google.cloud.aiplatform_v1.types.featurestore_online_service import FeatureValue +from google.cloud.aiplatform_v1.types.featurestore_online_service import FeatureValueList +from google.cloud.aiplatform_v1.types.featurestore_online_service import ReadFeatureValuesRequest +from google.cloud.aiplatform_v1.types.featurestore_online_service import ReadFeatureValuesResponse +from google.cloud.aiplatform_v1.types.featurestore_online_service import StreamingReadFeatureValuesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import BatchCreateFeaturesOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import BatchCreateFeaturesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import BatchCreateFeaturesResponse +from google.cloud.aiplatform_v1.types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import BatchReadFeatureValuesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import BatchReadFeatureValuesResponse +from google.cloud.aiplatform_v1.types.featurestore_service import CreateEntityTypeOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import CreateEntityTypeRequest +from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeatureOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeatureRequest +from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeaturestoreOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeaturestoreRequest +from google.cloud.aiplatform_v1.types.featurestore_service import DeleteEntityTypeRequest +from google.cloud.aiplatform_v1.types.featurestore_service import DeleteFeatureRequest +from google.cloud.aiplatform_v1.types.featurestore_service import DeleteFeaturestoreRequest +from google.cloud.aiplatform_v1.types.featurestore_service import DestinationFeatureSetting +from google.cloud.aiplatform_v1.types.featurestore_service import ExportFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import ExportFeatureValuesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import ExportFeatureValuesResponse +from google.cloud.aiplatform_v1.types.featurestore_service import FeatureValueDestination +from google.cloud.aiplatform_v1.types.featurestore_service import GetEntityTypeRequest +from google.cloud.aiplatform_v1.types.featurestore_service import GetFeatureRequest +from google.cloud.aiplatform_v1.types.featurestore_service import GetFeaturestoreRequest +from google.cloud.aiplatform_v1.types.featurestore_service import ImportFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import ImportFeatureValuesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import ImportFeatureValuesResponse +from google.cloud.aiplatform_v1.types.featurestore_service import ListEntityTypesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import ListEntityTypesResponse +from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturesResponse +from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturestoresRequest +from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturestoresResponse +from google.cloud.aiplatform_v1.types.featurestore_service import SearchFeaturesRequest +from google.cloud.aiplatform_v1.types.featurestore_service import SearchFeaturesResponse +from google.cloud.aiplatform_v1.types.featurestore_service import UpdateEntityTypeRequest +from google.cloud.aiplatform_v1.types.featurestore_service import UpdateFeatureRequest +from google.cloud.aiplatform_v1.types.featurestore_service import UpdateFeaturestoreOperationMetadata +from google.cloud.aiplatform_v1.types.featurestore_service import UpdateFeaturestoreRequest +from google.cloud.aiplatform_v1.types.hyperparameter_tuning_job import HyperparameterTuningJob +from google.cloud.aiplatform_v1.types.index import Index +from google.cloud.aiplatform_v1.types.index_endpoint import DeployedIndex +from google.cloud.aiplatform_v1.types.index_endpoint import DeployedIndexAuthConfig +from google.cloud.aiplatform_v1.types.index_endpoint import IndexEndpoint +from google.cloud.aiplatform_v1.types.index_endpoint import IndexPrivateEndpoints +from google.cloud.aiplatform_v1.types.index_endpoint_service import CreateIndexEndpointOperationMetadata +from google.cloud.aiplatform_v1.types.index_endpoint_service import CreateIndexEndpointRequest +from google.cloud.aiplatform_v1.types.index_endpoint_service import DeleteIndexEndpointRequest +from google.cloud.aiplatform_v1.types.index_endpoint_service import DeployIndexOperationMetadata +from google.cloud.aiplatform_v1.types.index_endpoint_service import DeployIndexRequest +from google.cloud.aiplatform_v1.types.index_endpoint_service import DeployIndexResponse +from google.cloud.aiplatform_v1.types.index_endpoint_service import GetIndexEndpointRequest +from google.cloud.aiplatform_v1.types.index_endpoint_service import ListIndexEndpointsRequest +from google.cloud.aiplatform_v1.types.index_endpoint_service import ListIndexEndpointsResponse +from google.cloud.aiplatform_v1.types.index_endpoint_service import MutateDeployedIndexOperationMetadata +from google.cloud.aiplatform_v1.types.index_endpoint_service import MutateDeployedIndexRequest +from google.cloud.aiplatform_v1.types.index_endpoint_service import MutateDeployedIndexResponse +from google.cloud.aiplatform_v1.types.index_endpoint_service import UndeployIndexOperationMetadata +from google.cloud.aiplatform_v1.types.index_endpoint_service import UndeployIndexRequest +from google.cloud.aiplatform_v1.types.index_endpoint_service import UndeployIndexResponse +from google.cloud.aiplatform_v1.types.index_endpoint_service import UpdateIndexEndpointRequest +from google.cloud.aiplatform_v1.types.index_service import CreateIndexOperationMetadata +from google.cloud.aiplatform_v1.types.index_service import CreateIndexRequest +from google.cloud.aiplatform_v1.types.index_service import DeleteIndexRequest +from google.cloud.aiplatform_v1.types.index_service import GetIndexRequest +from google.cloud.aiplatform_v1.types.index_service import ListIndexesRequest +from google.cloud.aiplatform_v1.types.index_service import ListIndexesResponse +from google.cloud.aiplatform_v1.types.index_service import NearestNeighborSearchOperationMetadata +from google.cloud.aiplatform_v1.types.index_service import UpdateIndexOperationMetadata +from google.cloud.aiplatform_v1.types.index_service import UpdateIndexRequest +from google.cloud.aiplatform_v1.types.io import AvroSource +from google.cloud.aiplatform_v1.types.io import BigQueryDestination +from google.cloud.aiplatform_v1.types.io import BigQuerySource +from google.cloud.aiplatform_v1.types.io import ContainerRegistryDestination +from google.cloud.aiplatform_v1.types.io import CsvDestination +from google.cloud.aiplatform_v1.types.io import CsvSource +from google.cloud.aiplatform_v1.types.io import GcsDestination +from google.cloud.aiplatform_v1.types.io import GcsSource +from google.cloud.aiplatform_v1.types.io import TFRecordDestination +from google.cloud.aiplatform_v1.types.job_service import CancelBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import CancelCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import CancelDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import CancelHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsResponse +from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsResponse +from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsResponse +from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsResponse +from google.cloud.aiplatform_v1.types.job_service import ListModelDeploymentMonitoringJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListModelDeploymentMonitoringJobsResponse +from google.cloud.aiplatform_v1.types.job_service import PauseModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1.types.job_service import ResumeModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest +from google.cloud.aiplatform_v1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse +from google.cloud.aiplatform_v1.types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata +from google.cloud.aiplatform_v1.types.job_service import UpdateModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1.types.job_state import JobState +from google.cloud.aiplatform_v1.types.lineage_subgraph import LineageSubgraph +from google.cloud.aiplatform_v1.types.machine_resources import AutomaticResources +from google.cloud.aiplatform_v1.types.machine_resources import AutoscalingMetricSpec +from google.cloud.aiplatform_v1.types.machine_resources import BatchDedicatedResources +from google.cloud.aiplatform_v1.types.machine_resources import DedicatedResources +from google.cloud.aiplatform_v1.types.machine_resources import DiskSpec +from google.cloud.aiplatform_v1.types.machine_resources import MachineSpec +from google.cloud.aiplatform_v1.types.machine_resources import NfsMount +from google.cloud.aiplatform_v1.types.machine_resources import ResourcesConsumed +from google.cloud.aiplatform_v1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from google.cloud.aiplatform_v1.types.metadata_schema import MetadataSchema +from google.cloud.aiplatform_v1.types.metadata_service import AddContextArtifactsAndExecutionsRequest +from google.cloud.aiplatform_v1.types.metadata_service import AddContextArtifactsAndExecutionsResponse +from google.cloud.aiplatform_v1.types.metadata_service import AddContextChildrenRequest +from google.cloud.aiplatform_v1.types.metadata_service import AddContextChildrenResponse +from google.cloud.aiplatform_v1.types.metadata_service import AddExecutionEventsRequest +from google.cloud.aiplatform_v1.types.metadata_service import AddExecutionEventsResponse +from google.cloud.aiplatform_v1.types.metadata_service import CreateArtifactRequest +from google.cloud.aiplatform_v1.types.metadata_service import CreateContextRequest +from google.cloud.aiplatform_v1.types.metadata_service import CreateExecutionRequest +from google.cloud.aiplatform_v1.types.metadata_service import CreateMetadataSchemaRequest +from google.cloud.aiplatform_v1.types.metadata_service import CreateMetadataStoreOperationMetadata +from google.cloud.aiplatform_v1.types.metadata_service import CreateMetadataStoreRequest +from google.cloud.aiplatform_v1.types.metadata_service import DeleteArtifactRequest +from google.cloud.aiplatform_v1.types.metadata_service import DeleteContextRequest +from google.cloud.aiplatform_v1.types.metadata_service import DeleteExecutionRequest +from google.cloud.aiplatform_v1.types.metadata_service import DeleteMetadataStoreOperationMetadata +from google.cloud.aiplatform_v1.types.metadata_service import DeleteMetadataStoreRequest +from google.cloud.aiplatform_v1.types.metadata_service import GetArtifactRequest +from google.cloud.aiplatform_v1.types.metadata_service import GetContextRequest +from google.cloud.aiplatform_v1.types.metadata_service import GetExecutionRequest +from google.cloud.aiplatform_v1.types.metadata_service import GetMetadataSchemaRequest +from google.cloud.aiplatform_v1.types.metadata_service import GetMetadataStoreRequest +from google.cloud.aiplatform_v1.types.metadata_service import ListArtifactsRequest +from google.cloud.aiplatform_v1.types.metadata_service import ListArtifactsResponse +from google.cloud.aiplatform_v1.types.metadata_service import ListContextsRequest +from google.cloud.aiplatform_v1.types.metadata_service import ListContextsResponse +from google.cloud.aiplatform_v1.types.metadata_service import ListExecutionsRequest +from google.cloud.aiplatform_v1.types.metadata_service import ListExecutionsResponse +from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataSchemasRequest +from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataSchemasResponse +from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataStoresRequest +from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataStoresResponse +from google.cloud.aiplatform_v1.types.metadata_service import PurgeArtifactsMetadata +from google.cloud.aiplatform_v1.types.metadata_service import PurgeArtifactsRequest +from google.cloud.aiplatform_v1.types.metadata_service import PurgeArtifactsResponse +from google.cloud.aiplatform_v1.types.metadata_service import PurgeContextsMetadata +from google.cloud.aiplatform_v1.types.metadata_service import PurgeContextsRequest +from google.cloud.aiplatform_v1.types.metadata_service import PurgeContextsResponse +from google.cloud.aiplatform_v1.types.metadata_service import PurgeExecutionsMetadata +from google.cloud.aiplatform_v1.types.metadata_service import PurgeExecutionsRequest +from google.cloud.aiplatform_v1.types.metadata_service import PurgeExecutionsResponse +from google.cloud.aiplatform_v1.types.metadata_service import QueryArtifactLineageSubgraphRequest +from google.cloud.aiplatform_v1.types.metadata_service import QueryContextLineageSubgraphRequest +from google.cloud.aiplatform_v1.types.metadata_service import QueryExecutionInputsAndOutputsRequest +from google.cloud.aiplatform_v1.types.metadata_service import UpdateArtifactRequest +from google.cloud.aiplatform_v1.types.metadata_service import UpdateContextRequest +from google.cloud.aiplatform_v1.types.metadata_service import UpdateExecutionRequest +from google.cloud.aiplatform_v1.types.metadata_store import MetadataStore +from google.cloud.aiplatform_v1.types.migratable_resource import MigratableResource +from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesOperationMetadata +from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesRequest +from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesResponse +from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceRequest +from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceResponse +from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesRequest +from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesResponse +from google.cloud.aiplatform_v1.types.model import Model +from google.cloud.aiplatform_v1.types.model import ModelContainerSpec +from google.cloud.aiplatform_v1.types.model import Port +from google.cloud.aiplatform_v1.types.model import PredictSchemata +from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable +from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob +from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig +from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig +from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies +from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType +from google.cloud.aiplatform_v1.types.model_evaluation import ModelEvaluation +from google.cloud.aiplatform_v1.types.model_evaluation_slice import ModelEvaluationSlice +from google.cloud.aiplatform_v1.types.model_monitoring import ModelMonitoringAlertConfig +from google.cloud.aiplatform_v1.types.model_monitoring import ModelMonitoringObjectiveConfig +from google.cloud.aiplatform_v1.types.model_monitoring import SamplingStrategy +from google.cloud.aiplatform_v1.types.model_monitoring import ThresholdConfig +from google.cloud.aiplatform_v1.types.model_service import DeleteModelRequest +from google.cloud.aiplatform_v1.types.model_service import DeleteModelVersionRequest +from google.cloud.aiplatform_v1.types.model_service import ExportModelOperationMetadata +from google.cloud.aiplatform_v1.types.model_service import ExportModelRequest +from google.cloud.aiplatform_v1.types.model_service import ExportModelResponse +from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationRequest +from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationSliceRequest +from google.cloud.aiplatform_v1.types.model_service import GetModelRequest +from google.cloud.aiplatform_v1.types.model_service import ImportModelEvaluationRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesResponse +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsResponse +from google.cloud.aiplatform_v1.types.model_service import ListModelsRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelsResponse +from google.cloud.aiplatform_v1.types.model_service import ListModelVersionsRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelVersionsResponse +from google.cloud.aiplatform_v1.types.model_service import MergeVersionAliasesRequest +from google.cloud.aiplatform_v1.types.model_service import UpdateModelRequest +from google.cloud.aiplatform_v1.types.model_service import UploadModelOperationMetadata +from google.cloud.aiplatform_v1.types.model_service import UploadModelRequest +from google.cloud.aiplatform_v1.types.model_service import UploadModelResponse +from google.cloud.aiplatform_v1.types.operation import DeleteOperationMetadata +from google.cloud.aiplatform_v1.types.operation import GenericOperationMetadata +from google.cloud.aiplatform_v1.types.pipeline_failure_policy import PipelineFailurePolicy +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJob +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJobDetail +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskDetail +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskExecutorDetail +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTemplateMetadata +from google.cloud.aiplatform_v1.types.pipeline_service import CancelPipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import CancelTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import CreatePipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import CreateTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import DeletePipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import DeleteTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import GetPipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import GetTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsRequest +from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsResponse +from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesRequest +from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesResponse +from google.cloud.aiplatform_v1.types.pipeline_state import PipelineState +from google.cloud.aiplatform_v1.types.prediction_service import ExplainRequest +from google.cloud.aiplatform_v1.types.prediction_service import ExplainResponse +from google.cloud.aiplatform_v1.types.prediction_service import PredictRequest +from google.cloud.aiplatform_v1.types.prediction_service import PredictResponse +from google.cloud.aiplatform_v1.types.prediction_service import RawPredictRequest +from google.cloud.aiplatform_v1.types.specialist_pool import SpecialistPool +from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import DeleteSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import GetSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsResponse +from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.study import Measurement +from google.cloud.aiplatform_v1.types.study import Study +from google.cloud.aiplatform_v1.types.study import StudySpec +from google.cloud.aiplatform_v1.types.study import Trial +from google.cloud.aiplatform_v1.types.tensorboard import Tensorboard +from google.cloud.aiplatform_v1.types.tensorboard_data import Scalar +from google.cloud.aiplatform_v1.types.tensorboard_data import TensorboardBlob +from google.cloud.aiplatform_v1.types.tensorboard_data import TensorboardBlobSequence +from google.cloud.aiplatform_v1.types.tensorboard_data import TensorboardTensor +from google.cloud.aiplatform_v1.types.tensorboard_data import TimeSeriesData +from google.cloud.aiplatform_v1.types.tensorboard_data import TimeSeriesDataPoint +from google.cloud.aiplatform_v1.types.tensorboard_experiment import TensorboardExperiment +from google.cloud.aiplatform_v1.types.tensorboard_run import TensorboardRun +from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardRunsRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardRunsResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardExperimentRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardOperationMetadata +from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardRunRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardExperimentRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardRunRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardExperimentRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardRunRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardExperimentsRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardExperimentsResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardRunsRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardRunsResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardsRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardsResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardTimeSeriesResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardBlobDataRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardBlobDataResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardExperimentRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardOperationMetadata +from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardRunRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardExperimentDataRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardExperimentDataResponse +from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardRunDataRequest +from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardRunDataResponse +from google.cloud.aiplatform_v1.types.tensorboard_time_series import TensorboardTimeSeries +from google.cloud.aiplatform_v1.types.training_pipeline import FilterSplit +from google.cloud.aiplatform_v1.types.training_pipeline import FractionSplit +from google.cloud.aiplatform_v1.types.training_pipeline import InputDataConfig +from google.cloud.aiplatform_v1.types.training_pipeline import PredefinedSplit +from google.cloud.aiplatform_v1.types.training_pipeline import StratifiedSplit +from google.cloud.aiplatform_v1.types.training_pipeline import TimestampSplit +from google.cloud.aiplatform_v1.types.training_pipeline import TrainingPipeline +from google.cloud.aiplatform_v1.types.types import BoolArray +from google.cloud.aiplatform_v1.types.types import DoubleArray +from google.cloud.aiplatform_v1.types.types import Int64Array +from google.cloud.aiplatform_v1.types.types import StringArray +from google.cloud.aiplatform_v1.types.unmanaged_container_model import UnmanagedContainerModel +from google.cloud.aiplatform_v1.types.user_action_reference import UserActionReference +from google.cloud.aiplatform_v1.types.value import Value +from google.cloud.aiplatform_v1.types.vizier_service import AddTrialMeasurementRequest +from google.cloud.aiplatform_v1.types.vizier_service import CheckTrialEarlyStoppingStateMetatdata +from google.cloud.aiplatform_v1.types.vizier_service import CheckTrialEarlyStoppingStateRequest +from google.cloud.aiplatform_v1.types.vizier_service import CheckTrialEarlyStoppingStateResponse +from google.cloud.aiplatform_v1.types.vizier_service import CompleteTrialRequest +from google.cloud.aiplatform_v1.types.vizier_service import CreateStudyRequest +from google.cloud.aiplatform_v1.types.vizier_service import CreateTrialRequest +from google.cloud.aiplatform_v1.types.vizier_service import DeleteStudyRequest +from google.cloud.aiplatform_v1.types.vizier_service import DeleteTrialRequest +from google.cloud.aiplatform_v1.types.vizier_service import GetStudyRequest +from google.cloud.aiplatform_v1.types.vizier_service import GetTrialRequest +from google.cloud.aiplatform_v1.types.vizier_service import ListOptimalTrialsRequest +from google.cloud.aiplatform_v1.types.vizier_service import ListOptimalTrialsResponse +from google.cloud.aiplatform_v1.types.vizier_service import ListStudiesRequest +from google.cloud.aiplatform_v1.types.vizier_service import ListStudiesResponse +from google.cloud.aiplatform_v1.types.vizier_service import ListTrialsRequest +from google.cloud.aiplatform_v1.types.vizier_service import ListTrialsResponse +from google.cloud.aiplatform_v1.types.vizier_service import LookupStudyRequest +from google.cloud.aiplatform_v1.types.vizier_service import StopTrialRequest +from google.cloud.aiplatform_v1.types.vizier_service import SuggestTrialsMetadata +from google.cloud.aiplatform_v1.types.vizier_service import SuggestTrialsRequest +from google.cloud.aiplatform_v1.types.vizier_service import SuggestTrialsResponse + +__all__ = ('DatasetServiceClient', + 'DatasetServiceAsyncClient', + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', + 'FeaturestoreOnlineServingServiceClient', + 'FeaturestoreOnlineServingServiceAsyncClient', + 'FeaturestoreServiceClient', + 'FeaturestoreServiceAsyncClient', + 'IndexEndpointServiceClient', + 'IndexEndpointServiceAsyncClient', + 'IndexServiceClient', + 'IndexServiceAsyncClient', + 'JobServiceClient', + 'JobServiceAsyncClient', + 'MetadataServiceClient', + 'MetadataServiceAsyncClient', + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', + 'ModelServiceClient', + 'ModelServiceAsyncClient', + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', + 'TensorboardServiceClient', + 'TensorboardServiceAsyncClient', + 'VizierServiceClient', + 'VizierServiceAsyncClient', + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedIndexRef', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'PredictRequestResponseLoggingConfig', + 'PrivateEndpoints', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EntityType', + 'EnvVar', + 'Event', + 'Execution', + 'Attribution', + 'BlurBaselineConfig', + 'ExamplesOverride', + 'ExamplesRestrictionsNamespace', + 'Explanation', + 'ExplanationMetadataOverride', + 'ExplanationParameters', + 'ExplanationSpec', + 'ExplanationSpecOverride', + 'FeatureNoiseSigma', + 'IntegratedGradientsAttribution', + 'ModelExplanation', + 'Neighbor', + 'SampledShapleyAttribution', + 'SmoothGradConfig', + 'XraiAttribution', + 'ExplanationMetadata', + 'Feature', + 'FeatureStatsAnomaly', + 'FeatureSelector', + 'IdMatcher', + 'Featurestore', + 'FeaturestoreMonitoringConfig', + 'FeatureValue', + 'FeatureValueList', + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'BatchCreateFeaturesOperationMetadata', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'BatchReadFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesRequest', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeOperationMetadata', + 'CreateEntityTypeRequest', + 'CreateFeatureOperationMetadata', + 'CreateFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'CreateFeaturestoreRequest', + 'DeleteEntityTypeRequest', + 'DeleteFeatureRequest', + 'DeleteFeaturestoreRequest', + 'DestinationFeatureSetting', + 'ExportFeatureValuesOperationMetadata', + 'ExportFeatureValuesRequest', + 'ExportFeatureValuesResponse', + 'FeatureValueDestination', + 'GetEntityTypeRequest', + 'GetFeatureRequest', + 'GetFeaturestoreRequest', + 'ImportFeatureValuesOperationMetadata', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateEntityTypeRequest', + 'UpdateFeatureRequest', + 'UpdateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreRequest', + 'HyperparameterTuningJob', + 'Index', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexEndpoint', + 'IndexPrivateEndpoints', + 'CreateIndexEndpointOperationMetadata', + 'CreateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexOperationMetadata', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'MutateDeployedIndexOperationMetadata', + 'MutateDeployedIndexRequest', + 'MutateDeployedIndexResponse', + 'UndeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UpdateIndexEndpointRequest', + 'CreateIndexOperationMetadata', + 'CreateIndexRequest', + 'DeleteIndexRequest', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'NearestNeighborSearchOperationMetadata', + 'UpdateIndexOperationMetadata', + 'UpdateIndexRequest', + 'AvroSource', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'CsvDestination', + 'CsvSource', + 'GcsDestination', + 'GcsSource', + 'TFRecordDestination', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'GetModelDeploymentMonitoringJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + 'UpdateModelDeploymentMonitoringJobRequest', + 'JobState', + 'LineageSubgraph', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'NfsMount', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MetadataSchema', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'CreateArtifactRequest', + 'CreateContextRequest', + 'CreateExecutionRequest', + 'CreateMetadataSchemaRequest', + 'CreateMetadataStoreOperationMetadata', + 'CreateMetadataStoreRequest', + 'DeleteArtifactRequest', + 'DeleteContextRequest', + 'DeleteExecutionRequest', + 'DeleteMetadataStoreOperationMetadata', + 'DeleteMetadataStoreRequest', + 'GetArtifactRequest', + 'GetContextRequest', + 'GetExecutionRequest', + 'GetMetadataSchemaRequest', + 'GetMetadataStoreRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'ListContextsRequest', + 'ListContextsResponse', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'PurgeArtifactsMetadata', + 'PurgeArtifactsRequest', + 'PurgeArtifactsResponse', + 'PurgeContextsMetadata', + 'PurgeContextsRequest', + 'PurgeContextsResponse', + 'PurgeExecutionsMetadata', + 'PurgeExecutionsRequest', + 'PurgeExecutionsResponse', + 'QueryArtifactLineageSubgraphRequest', + 'QueryContextLineageSubgraphRequest', + 'QueryExecutionInputsAndOutputsRequest', + 'UpdateArtifactRequest', + 'UpdateContextRequest', + 'UpdateExecutionRequest', + 'MetadataStore', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + 'ModelDeploymentMonitoringObjectiveType', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'ModelMonitoringAlertConfig', + 'ModelMonitoringObjectiveConfig', + 'SamplingStrategy', + 'ThresholdConfig', + 'DeleteModelRequest', + 'DeleteModelVersionRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ImportModelEvaluationRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListModelVersionsRequest', + 'ListModelVersionsResponse', + 'MergeVersionAliasesRequest', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineFailurePolicy', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'PipelineTemplateMetadata', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'ExplainRequest', + 'ExplainResponse', + 'PredictRequest', + 'PredictResponse', + 'RawPredictRequest', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'Study', + 'StudySpec', + 'Trial', + 'Tensorboard', + 'Scalar', + 'TensorboardBlob', + 'TensorboardBlobSequence', + 'TensorboardTensor', + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'TensorboardExperiment', + 'TensorboardRun', + 'BatchCreateTensorboardRunsRequest', + 'BatchCreateTensorboardRunsResponse', + 'BatchCreateTensorboardTimeSeriesRequest', + 'BatchCreateTensorboardTimeSeriesResponse', + 'BatchReadTensorboardTimeSeriesDataRequest', + 'BatchReadTensorboardTimeSeriesDataResponse', + 'CreateTensorboardExperimentRequest', + 'CreateTensorboardOperationMetadata', + 'CreateTensorboardRequest', + 'CreateTensorboardRunRequest', + 'CreateTensorboardTimeSeriesRequest', + 'DeleteTensorboardExperimentRequest', + 'DeleteTensorboardRequest', + 'DeleteTensorboardRunRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'GetTensorboardExperimentRequest', + 'GetTensorboardRequest', + 'GetTensorboardRunRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'UpdateTensorboardExperimentRequest', + 'UpdateTensorboardOperationMetadata', + 'UpdateTensorboardRequest', + 'UpdateTensorboardRunRequest', + 'UpdateTensorboardTimeSeriesRequest', + 'WriteTensorboardExperimentDataRequest', + 'WriteTensorboardExperimentDataResponse', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'TensorboardTimeSeries', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'StratifiedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + 'UnmanagedContainerModel', + 'UserActionReference', + 'Value', + 'AddTrialMeasurementRequest', + 'CheckTrialEarlyStoppingStateMetatdata', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CompleteTrialRequest', + 'CreateStudyRequest', + 'CreateTrialRequest', + 'DeleteStudyRequest', + 'DeleteTrialRequest', + 'GetStudyRequest', + 'GetTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'LookupStudyRequest', + 'StopTrialRequest', + 'SuggestTrialsMetadata', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..1de2a42506 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py new file mode 100644 index 0000000000..4d6b2e70eb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json new file mode 100644 index 0000000000..0ae909d6ea --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.instance_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.instance", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py new file mode 100644 index 0000000000..46c9c24a2b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py new file mode 100644 index 0000000000..112cdf2840 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py new file mode 100644 index 0000000000..f82da03787 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py new file mode 100644 index 0000000000..082147facc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py new file mode 100644 index 0000000000..657737bc43 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py new file mode 100644 index 0000000000..d25d27f9a4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. Vertex AI will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py new file mode 100644 index 0000000000..59c5d0c7c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..76cadbf637 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py new file mode 100644 index 0000000000..d59449f59d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..e6eedd7547 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..70af979db9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py new file mode 100644 index 0000000000..0a16e52cea --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json new file mode 100644 index 0000000000..edfffb441b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.params_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.params", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py new file mode 100644 index 0000000000..20e59dc95c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py new file mode 100644 index 0000000000..b32f820742 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py new file mode 100644 index 0000000000..59382f8a0b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py new file mode 100644 index 0000000000..57d704cc30 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..bb763bfb72 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py new file mode 100644 index 0000000000..b9c3e728e6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. Vertex AI returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. Vertex AI determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. Vertex AI then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..104bd5c8a0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..5c91bf742d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py new file mode 100644 index 0000000000..d47edaa75b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json new file mode 100644 index 0000000000..ba1d67a00c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.prediction_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py new file mode 100644 index 0000000000..aa1097a720 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py new file mode 100644 index 0000000000..cf2b61a9b2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py new file mode 100644 index 0000000000..7c852ee51e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py new file mode 100644 index 0000000000..6b1a1eaa88 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py new file mode 100644 index 0000000000..f3a904f503 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py new file mode 100644 index 0000000000..9a10dd4503 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py new file mode 100644 index 0000000000..dec75aa581 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py new file mode 100644 index 0000000000..7be911616e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..7b53843b86 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py new file mode 100644 index 0000000000..732c5e73b1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..7c76230c2d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py new file mode 100644 index 0000000000..7f0d6005fe --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassification +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetection +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentation +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTables +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassification +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassificationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtraction +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtractionInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentiment +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentimentInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognition +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassification +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassificationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTracking +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ('AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed new file mode 100644 index 0000000000..1a9d2972a0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py new file mode 100644 index 0000000000..0df2b9204e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.automl_image_classification import AutoMlImageClassification +from .types.automl_image_classification import AutoMlImageClassificationInputs +from .types.automl_image_classification import AutoMlImageClassificationMetadata +from .types.automl_image_object_detection import AutoMlImageObjectDetection +from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from .types.automl_image_segmentation import AutoMlImageSegmentation +from .types.automl_image_segmentation import AutoMlImageSegmentationInputs +from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from .types.automl_tables import AutoMlTables +from .types.automl_tables import AutoMlTablesInputs +from .types.automl_tables import AutoMlTablesMetadata +from .types.automl_text_classification import AutoMlTextClassification +from .types.automl_text_classification import AutoMlTextClassificationInputs +from .types.automl_text_extraction import AutoMlTextExtraction +from .types.automl_text_extraction import AutoMlTextExtractionInputs +from .types.automl_text_sentiment import AutoMlTextSentiment +from .types.automl_text_sentiment import AutoMlTextSentimentInputs +from .types.automl_video_action_recognition import AutoMlVideoActionRecognition +from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from .types.automl_video_classification import AutoMlVideoClassification +from .types.automl_video_classification import AutoMlVideoClassificationInputs +from .types.automl_video_object_tracking import AutoMlVideoObjectTracking +from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ( +'AutoMlImageClassification', +'AutoMlImageClassificationInputs', +'AutoMlImageClassificationMetadata', +'AutoMlImageObjectDetection', +'AutoMlImageObjectDetectionInputs', +'AutoMlImageObjectDetectionMetadata', +'AutoMlImageSegmentation', +'AutoMlImageSegmentationInputs', +'AutoMlImageSegmentationMetadata', +'AutoMlTables', +'AutoMlTablesInputs', +'AutoMlTablesMetadata', +'AutoMlTextClassification', +'AutoMlTextClassificationInputs', +'AutoMlTextExtraction', +'AutoMlTextExtractionInputs', +'AutoMlTextSentiment', +'AutoMlTextSentimentInputs', +'AutoMlVideoActionRecognition', +'AutoMlVideoActionRecognitionInputs', +'AutoMlVideoClassification', +'AutoMlVideoClassificationInputs', +'AutoMlVideoObjectTracking', +'AutoMlVideoObjectTrackingInputs', +'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json new file mode 100644 index 0000000000..620ff75f05 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed new file mode 100644 index 0000000000..1a9d2972a0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py new file mode 100644 index 0000000000..9f9a7ed6e3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .automl_image_classification import ( + AutoMlImageClassification, + AutoMlImageClassificationInputs, + AutoMlImageClassificationMetadata, +) +from .automl_image_object_detection import ( + AutoMlImageObjectDetection, + AutoMlImageObjectDetectionInputs, + AutoMlImageObjectDetectionMetadata, +) +from .automl_image_segmentation import ( + AutoMlImageSegmentation, + AutoMlImageSegmentationInputs, + AutoMlImageSegmentationMetadata, +) +from .automl_tables import ( + AutoMlTables, + AutoMlTablesInputs, + AutoMlTablesMetadata, +) +from .automl_text_classification import ( + AutoMlTextClassification, + AutoMlTextClassificationInputs, +) +from .automl_text_extraction import ( + AutoMlTextExtraction, + AutoMlTextExtractionInputs, +) +from .automl_text_sentiment import ( + AutoMlTextSentiment, + AutoMlTextSentimentInputs, +) +from .automl_video_action_recognition import ( + AutoMlVideoActionRecognition, + AutoMlVideoActionRecognitionInputs, +) +from .automl_video_classification import ( + AutoMlVideoClassification, + AutoMlVideoClassificationInputs, +) +from .automl_video_object_tracking import ( + AutoMlVideoObjectTracking, + AutoMlVideoObjectTrackingInputs, +) +from .export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) + +__all__ = ( + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py new file mode 100644 index 0000000000..8b109c4e19 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + }, +) + + +class AutoMlImageClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageClassificationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageClassificationMetadata', + ) + + +class AutoMlImageClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs.ModelType): + + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 8,000 and + 800,000 milli node hours, inclusive. The default value is + 192,000 which represents one day in wall time, considering 8 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, + the training budget must be between 1,000 and 100,000 milli + node hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Classification might stop training + before the entire training budget has been used. + multi_label (bool): + If false, a single-label (multi-class) Model + will be trained (i.e. assuming that for each + image just up to one annotation may be + applicable). If true, a multi-label Model will + be trained (i.e. assuming that for each image + multiple annotations may be applicable). + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_TF_LOW_LATENCY_1 = 2 + MOBILE_TF_VERSATILE_1 = 3 + MOBILE_TF_HIGH_ACCURACY_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + base_model_id = proto.Field( + proto.STRING, + number=2, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=3, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=4, + ) + multi_label = proto.Field( + proto.BOOL, + number=5, + ) + + +class AutoMlImageClassificationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py new file mode 100644 index 0000000000..0cf840deee --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + }, +) + + +class AutoMlImageObjectDetection(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image Object + Detection Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata): + The metadata information + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageObjectDetectionInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageObjectDetectionMetadata', + ) + + +class AutoMlImageObjectDetectionInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 20,000 and + 900,000 milli node hours, inclusive. The default value is + 216,000 which represents one day in wall time, considering 9 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the + training budget must be between 1,000 and 100,000 milli node + hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Object Detection might stop + training before the entire training budget has + been used. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_LATENCY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + MOBILE_TF_VERSATILE_1 = 4 + MOBILE_TF_HIGH_ACCURACY_1 = 5 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=3, + ) + + +class AutoMlImageObjectDetectionMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py new file mode 100644 index 0000000000..0e38c2c85b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + }, +) + + +class AutoMlImageSegmentation(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Segmentation Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageSegmentationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageSegmentationMetadata', + ) + + +class AutoMlImageSegmentationInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. Or actaul_wall_clock_hours = + train_budget_milli_node_hours / (number_of_nodes_involved \* + 1000) For modelType ``cloud-high-accuracy-1``\ (default), + the budget must be between 20,000 and 2,000,000 milli node + hours, inclusive. The default value is 192,000 which + represents one day in wall time (1000 milli \* 24 hours \* 8 + nodes). + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_ACCURACY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + base_model_id = proto.Field( + proto.STRING, + number=3, + ) + + +class AutoMlImageSegmentationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py new file mode 100644 index 0000000000..45ac47748f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py @@ -0,0 +1,529 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + }, +) + + +class AutoMlTables(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Tables Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTablesInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlTablesMetadata', + ) + + +class AutoMlTablesInputs(proto.Message): + r""" + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "maximize-precision-at-recall". Must be between 0 and 1, + inclusive. + + This field is a member of `oneof`_ ``additional_optimization_objective_config``. + optimization_objective_precision_value (float): + Required when optimization_objective is + "maximize-recall-at-precision". Must be between 0 and 1, + inclusive. + + This field is a member of `oneof`_ ``additional_optimization_objective_config``. + prediction_type (str): + The type of prediction the Model is to + produce. "classification" - Predict one out of + multiple target values is + picked for each row. + "regression" - Predict a value based on its + relation to other values. This + type is available only to columns that contain + semantically numeric values, i.e. integers or + floating point number, even if + stored as e.g. strings. + target_column (str): + The column name of the target column that the + model is to predict. + transformations (Sequence[google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that maximizes/minimizes the value of the + objective function over the validation set. + + The supported optimization objectives depend on + the prediction type. If the field is not set, a + default objective function is used. + classification (binary): + "maximize-au-roc" (default) - Maximize the + area under the receiver + operating characteristic (ROC) curve. + "minimize-log-loss" - Minimize log loss. + "maximize-au-prc" - Maximize the area under + the precision-recall curve. + "maximize-precision-at-recall" - Maximize + precision for a specified + recall value. "maximize-recall-at-precision" - + Maximize recall for a specified + precision value. + classification (multi-class): + "minimize-log-loss" (default) - Minimize log + loss. + regression: + "minimize-rmse" (default) - Minimize + root-mean-squared error (RMSE). "minimize-mae" + - Minimize mean-absolute error (MAE). + "minimize-rmsle" - Minimize root-mean-squared + log error (RMSLE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + weight_column_name (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + export_evaluated_data_items_config (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + additional_experiments (Sequence[str]): + Additional experiment flags for the Tables + training pipeline. + """ + + class Transformation(proto.Message): + r""" + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + timestamp (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + repeated_numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + repeated_categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + repeated_text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + time_format = proto.Field( + proto.STRING, + number=2, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=3, + ) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.AutoTransformation', + ) + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericTransformation', + ) + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalTransformation', + ) + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TimestampTransformation', + ) + text = proto.Field( + proto.MESSAGE, + number=5, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextTransformation', + ) + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', + ) + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', + ) + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextArrayTransformation', + ) + + optimization_objective_recall_value = proto.Field( + proto.FLOAT, + number=5, + oneof='additional_optimization_objective_config', + ) + optimization_objective_precision_value = proto.Field( + proto.FLOAT, + number=6, + oneof='additional_optimization_objective_config', + ) + prediction_type = proto.Field( + proto.STRING, + number=1, + ) + target_column = proto.Field( + proto.STRING, + number=2, + ) + transformations = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Transformation, + ) + optimization_objective = proto.Field( + proto.STRING, + number=4, + ) + train_budget_milli_node_hours = proto.Field( + proto.INT64, + number=7, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=8, + ) + weight_column_name = proto.Field( + proto.STRING, + number=9, + ) + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=10, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + additional_experiments = proto.RepeatedField( + proto.STRING, + number=11, + ) + + +class AutoMlTablesMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py new file mode 100644 index 0000000000..df4cc66988 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + }, +) + + +class AutoMlTextClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextClassificationInputs', + ) + + +class AutoMlTextClassificationInputs(proto.Message): + r""" + + Attributes: + multi_label (bool): + + """ + + multi_label = proto.Field( + proto.BOOL, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py new file mode 100644 index 0000000000..b5dfe634f9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + }, +) + + +class AutoMlTextExtraction(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Extraction Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextExtractionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextExtractionInputs', + ) + + +class AutoMlTextExtractionInputs(proto.Message): + r""" + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py new file mode 100644 index 0000000000..e903a985eb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + }, +) + + +class AutoMlTextSentiment(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Sentiment Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextSentimentInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextSentimentInputs', + ) + + +class AutoMlTextSentimentInputs(proto.Message): + r""" + + Attributes: + sentiment_max (int): + A sentiment is expressed as an integer + ordinal, where higher value means a more + positive sentiment. The range of sentiments that + will be used is between 0 and sentimentMax + (inclusive on both ends), and all the values in + the range must be represented in the dataset + before a model can be created. + Only the Annotations with this sentimentMax will + be used for training. sentimentMax value must be + between 1 and 10 (inclusive). + """ + + sentiment_max = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py new file mode 100644 index 0000000000..fd27a3d07e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + }, +) + + +class AutoMlVideoActionRecognition(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video Action + Recognition Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoActionRecognitionInputs', + ) + + +class AutoMlVideoActionRecognitionInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + MOBILE_CORAL_VERSATILE_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py new file mode 100644 index 0000000000..6f824b40d2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + }, +) + + +class AutoMlVideoClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoClassificationInputs', + ) + + +class AutoMlVideoClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py new file mode 100644 index 0000000000..fed3b81225 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + }, +) + + +class AutoMlVideoObjectTracking(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + ObjectTracking Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoObjectTrackingInputs', + ) + + +class AutoMlVideoObjectTrackingInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_CORAL_VERSATILE_1 = 3 + MOBILE_CORAL_LOW_LATENCY_1 = 4 + MOBILE_JETSON_VERSATILE_1 = 5 + MOBILE_JETSON_LOW_LATENCY_1 = 6 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py new file mode 100644 index 0000000000..39534a0596 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'ExportEvaluatedDataItemsConfig', + }, +) + + +class ExportEvaluatedDataItemsConfig(proto.Message): + r"""Configuration for exporting test set predictions to a + BigQuery table. + + Attributes: + destination_bigquery_uri (str): + URI of desired destination BigQuery table. Expected format: + bq://:: + + If not specified, then results are exported to the following + auto-created BigQuery table: + :export_evaluated_examples__.evaluated_examples + override_existing_table (bool): + If true and an export destination is + specified, then the contents of the destination + are overwritten. Otherwise, if the export + destination already exists, then the export + operation fails. + """ + + destination_bigquery_uri = proto.Field( + proto.STRING, + number=1, + ) + override_existing_table = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py new file mode 100644 index 0000000000..8dc85414b9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py @@ -0,0 +1,954 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.dataset_service import DatasetServiceClient +from .services.dataset_service import DatasetServiceAsyncClient +from .services.endpoint_service import EndpointServiceClient +from .services.endpoint_service import EndpointServiceAsyncClient +from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient +from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient +from .services.featurestore_service import FeaturestoreServiceClient +from .services.featurestore_service import FeaturestoreServiceAsyncClient +from .services.index_endpoint_service import IndexEndpointServiceClient +from .services.index_endpoint_service import IndexEndpointServiceAsyncClient +from .services.index_service import IndexServiceClient +from .services.index_service import IndexServiceAsyncClient +from .services.job_service import JobServiceClient +from .services.job_service import JobServiceAsyncClient +from .services.metadata_service import MetadataServiceClient +from .services.metadata_service import MetadataServiceAsyncClient +from .services.migration_service import MigrationServiceClient +from .services.migration_service import MigrationServiceAsyncClient +from .services.model_service import ModelServiceClient +from .services.model_service import ModelServiceAsyncClient +from .services.pipeline_service import PipelineServiceClient +from .services.pipeline_service import PipelineServiceAsyncClient +from .services.prediction_service import PredictionServiceClient +from .services.prediction_service import PredictionServiceAsyncClient +from .services.specialist_pool_service import SpecialistPoolServiceClient +from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient +from .services.tensorboard_service import TensorboardServiceClient +from .services.tensorboard_service import TensorboardServiceAsyncClient +from .services.vizier_service import VizierServiceClient +from .services.vizier_service import VizierServiceAsyncClient + +from .types.accelerator_type import AcceleratorType +from .types.annotation import Annotation +from .types.annotation_spec import AnnotationSpec +from .types.artifact import Artifact +from .types.batch_prediction_job import BatchPredictionJob +from .types.completion_stats import CompletionStats +from .types.context import Context +from .types.custom_job import ContainerSpec +from .types.custom_job import CustomJob +from .types.custom_job import CustomJobSpec +from .types.custom_job import PythonPackageSpec +from .types.custom_job import Scheduling +from .types.custom_job import WorkerPoolSpec +from .types.data_item import DataItem +from .types.data_labeling_job import ActiveLearningConfig +from .types.data_labeling_job import DataLabelingJob +from .types.data_labeling_job import SampleConfig +from .types.data_labeling_job import TrainingConfig +from .types.dataset import Dataset +from .types.dataset import ExportDataConfig +from .types.dataset import ImportDataConfig +from .types.dataset_service import CreateDatasetOperationMetadata +from .types.dataset_service import CreateDatasetRequest +from .types.dataset_service import DeleteDatasetRequest +from .types.dataset_service import ExportDataOperationMetadata +from .types.dataset_service import ExportDataRequest +from .types.dataset_service import ExportDataResponse +from .types.dataset_service import GetAnnotationSpecRequest +from .types.dataset_service import GetDatasetRequest +from .types.dataset_service import ImportDataOperationMetadata +from .types.dataset_service import ImportDataRequest +from .types.dataset_service import ImportDataResponse +from .types.dataset_service import ListAnnotationsRequest +from .types.dataset_service import ListAnnotationsResponse +from .types.dataset_service import ListDataItemsRequest +from .types.dataset_service import ListDataItemsResponse +from .types.dataset_service import ListDatasetsRequest +from .types.dataset_service import ListDatasetsResponse +from .types.dataset_service import UpdateDatasetRequest +from .types.deployed_index_ref import DeployedIndexRef +from .types.deployed_model_ref import DeployedModelRef +from .types.encryption_spec import EncryptionSpec +from .types.endpoint import DeployedModel +from .types.endpoint import Endpoint +from .types.endpoint import PredictRequestResponseLoggingConfig +from .types.endpoint import PrivateEndpoints +from .types.endpoint_service import CreateEndpointOperationMetadata +from .types.endpoint_service import CreateEndpointRequest +from .types.endpoint_service import DeleteEndpointRequest +from .types.endpoint_service import DeployModelOperationMetadata +from .types.endpoint_service import DeployModelRequest +from .types.endpoint_service import DeployModelResponse +from .types.endpoint_service import GetEndpointRequest +from .types.endpoint_service import ListEndpointsRequest +from .types.endpoint_service import ListEndpointsResponse +from .types.endpoint_service import UndeployModelOperationMetadata +from .types.endpoint_service import UndeployModelRequest +from .types.endpoint_service import UndeployModelResponse +from .types.endpoint_service import UpdateEndpointRequest +from .types.entity_type import EntityType +from .types.env_var import EnvVar +from .types.event import Event +from .types.execution import Execution +from .types.explanation import Attribution +from .types.explanation import BlurBaselineConfig +from .types.explanation import ExamplesOverride +from .types.explanation import ExamplesRestrictionsNamespace +from .types.explanation import Explanation +from .types.explanation import ExplanationMetadataOverride +from .types.explanation import ExplanationParameters +from .types.explanation import ExplanationSpec +from .types.explanation import ExplanationSpecOverride +from .types.explanation import FeatureNoiseSigma +from .types.explanation import IntegratedGradientsAttribution +from .types.explanation import ModelExplanation +from .types.explanation import Neighbor +from .types.explanation import SampledShapleyAttribution +from .types.explanation import SmoothGradConfig +from .types.explanation import XraiAttribution +from .types.explanation_metadata import ExplanationMetadata +from .types.feature import Feature +from .types.feature_monitoring_stats import FeatureStatsAnomaly +from .types.feature_selector import FeatureSelector +from .types.feature_selector import IdMatcher +from .types.featurestore import Featurestore +from .types.featurestore_monitoring import FeaturestoreMonitoringConfig +from .types.featurestore_online_service import FeatureValue +from .types.featurestore_online_service import FeatureValueList +from .types.featurestore_online_service import ReadFeatureValuesRequest +from .types.featurestore_online_service import ReadFeatureValuesResponse +from .types.featurestore_online_service import StreamingReadFeatureValuesRequest +from .types.featurestore_service import BatchCreateFeaturesOperationMetadata +from .types.featurestore_service import BatchCreateFeaturesRequest +from .types.featurestore_service import BatchCreateFeaturesResponse +from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from .types.featurestore_service import BatchReadFeatureValuesRequest +from .types.featurestore_service import BatchReadFeatureValuesResponse +from .types.featurestore_service import CreateEntityTypeOperationMetadata +from .types.featurestore_service import CreateEntityTypeRequest +from .types.featurestore_service import CreateFeatureOperationMetadata +from .types.featurestore_service import CreateFeatureRequest +from .types.featurestore_service import CreateFeaturestoreOperationMetadata +from .types.featurestore_service import CreateFeaturestoreRequest +from .types.featurestore_service import DeleteEntityTypeRequest +from .types.featurestore_service import DeleteFeatureRequest +from .types.featurestore_service import DeleteFeaturestoreRequest +from .types.featurestore_service import DestinationFeatureSetting +from .types.featurestore_service import ExportFeatureValuesOperationMetadata +from .types.featurestore_service import ExportFeatureValuesRequest +from .types.featurestore_service import ExportFeatureValuesResponse +from .types.featurestore_service import FeatureValueDestination +from .types.featurestore_service import GetEntityTypeRequest +from .types.featurestore_service import GetFeatureRequest +from .types.featurestore_service import GetFeaturestoreRequest +from .types.featurestore_service import ImportFeatureValuesOperationMetadata +from .types.featurestore_service import ImportFeatureValuesRequest +from .types.featurestore_service import ImportFeatureValuesResponse +from .types.featurestore_service import ListEntityTypesRequest +from .types.featurestore_service import ListEntityTypesResponse +from .types.featurestore_service import ListFeaturesRequest +from .types.featurestore_service import ListFeaturesResponse +from .types.featurestore_service import ListFeaturestoresRequest +from .types.featurestore_service import ListFeaturestoresResponse +from .types.featurestore_service import SearchFeaturesRequest +from .types.featurestore_service import SearchFeaturesResponse +from .types.featurestore_service import UpdateEntityTypeRequest +from .types.featurestore_service import UpdateFeatureRequest +from .types.featurestore_service import UpdateFeaturestoreOperationMetadata +from .types.featurestore_service import UpdateFeaturestoreRequest +from .types.hyperparameter_tuning_job import HyperparameterTuningJob +from .types.index import Index +from .types.index_endpoint import DeployedIndex +from .types.index_endpoint import DeployedIndexAuthConfig +from .types.index_endpoint import IndexEndpoint +from .types.index_endpoint import IndexPrivateEndpoints +from .types.index_endpoint_service import CreateIndexEndpointOperationMetadata +from .types.index_endpoint_service import CreateIndexEndpointRequest +from .types.index_endpoint_service import DeleteIndexEndpointRequest +from .types.index_endpoint_service import DeployIndexOperationMetadata +from .types.index_endpoint_service import DeployIndexRequest +from .types.index_endpoint_service import DeployIndexResponse +from .types.index_endpoint_service import GetIndexEndpointRequest +from .types.index_endpoint_service import ListIndexEndpointsRequest +from .types.index_endpoint_service import ListIndexEndpointsResponse +from .types.index_endpoint_service import MutateDeployedIndexOperationMetadata +from .types.index_endpoint_service import MutateDeployedIndexRequest +from .types.index_endpoint_service import MutateDeployedIndexResponse +from .types.index_endpoint_service import UndeployIndexOperationMetadata +from .types.index_endpoint_service import UndeployIndexRequest +from .types.index_endpoint_service import UndeployIndexResponse +from .types.index_endpoint_service import UpdateIndexEndpointRequest +from .types.index_service import CreateIndexOperationMetadata +from .types.index_service import CreateIndexRequest +from .types.index_service import DeleteIndexRequest +from .types.index_service import GetIndexRequest +from .types.index_service import ListIndexesRequest +from .types.index_service import ListIndexesResponse +from .types.index_service import NearestNeighborSearchOperationMetadata +from .types.index_service import UpdateIndexOperationMetadata +from .types.index_service import UpdateIndexRequest +from .types.io import AvroSource +from .types.io import BigQueryDestination +from .types.io import BigQuerySource +from .types.io import ContainerRegistryDestination +from .types.io import CsvDestination +from .types.io import CsvSource +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.io import TFRecordDestination +from .types.job_service import CancelBatchPredictionJobRequest +from .types.job_service import CancelCustomJobRequest +from .types.job_service import CancelDataLabelingJobRequest +from .types.job_service import CancelHyperparameterTuningJobRequest +from .types.job_service import CreateBatchPredictionJobRequest +from .types.job_service import CreateCustomJobRequest +from .types.job_service import CreateDataLabelingJobRequest +from .types.job_service import CreateHyperparameterTuningJobRequest +from .types.job_service import CreateModelDeploymentMonitoringJobRequest +from .types.job_service import DeleteBatchPredictionJobRequest +from .types.job_service import DeleteCustomJobRequest +from .types.job_service import DeleteDataLabelingJobRequest +from .types.job_service import DeleteHyperparameterTuningJobRequest +from .types.job_service import DeleteModelDeploymentMonitoringJobRequest +from .types.job_service import GetBatchPredictionJobRequest +from .types.job_service import GetCustomJobRequest +from .types.job_service import GetDataLabelingJobRequest +from .types.job_service import GetHyperparameterTuningJobRequest +from .types.job_service import GetModelDeploymentMonitoringJobRequest +from .types.job_service import ListBatchPredictionJobsRequest +from .types.job_service import ListBatchPredictionJobsResponse +from .types.job_service import ListCustomJobsRequest +from .types.job_service import ListCustomJobsResponse +from .types.job_service import ListDataLabelingJobsRequest +from .types.job_service import ListDataLabelingJobsResponse +from .types.job_service import ListHyperparameterTuningJobsRequest +from .types.job_service import ListHyperparameterTuningJobsResponse +from .types.job_service import ListModelDeploymentMonitoringJobsRequest +from .types.job_service import ListModelDeploymentMonitoringJobsResponse +from .types.job_service import PauseModelDeploymentMonitoringJobRequest +from .types.job_service import ResumeModelDeploymentMonitoringJobRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse +from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata +from .types.job_service import UpdateModelDeploymentMonitoringJobRequest +from .types.job_state import JobState +from .types.lineage_subgraph import LineageSubgraph +from .types.machine_resources import AutomaticResources +from .types.machine_resources import AutoscalingMetricSpec +from .types.machine_resources import BatchDedicatedResources +from .types.machine_resources import DedicatedResources +from .types.machine_resources import DiskSpec +from .types.machine_resources import MachineSpec +from .types.machine_resources import NfsMount +from .types.machine_resources import ResourcesConsumed +from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from .types.metadata_schema import MetadataSchema +from .types.metadata_service import AddContextArtifactsAndExecutionsRequest +from .types.metadata_service import AddContextArtifactsAndExecutionsResponse +from .types.metadata_service import AddContextChildrenRequest +from .types.metadata_service import AddContextChildrenResponse +from .types.metadata_service import AddExecutionEventsRequest +from .types.metadata_service import AddExecutionEventsResponse +from .types.metadata_service import CreateArtifactRequest +from .types.metadata_service import CreateContextRequest +from .types.metadata_service import CreateExecutionRequest +from .types.metadata_service import CreateMetadataSchemaRequest +from .types.metadata_service import CreateMetadataStoreOperationMetadata +from .types.metadata_service import CreateMetadataStoreRequest +from .types.metadata_service import DeleteArtifactRequest +from .types.metadata_service import DeleteContextRequest +from .types.metadata_service import DeleteExecutionRequest +from .types.metadata_service import DeleteMetadataStoreOperationMetadata +from .types.metadata_service import DeleteMetadataStoreRequest +from .types.metadata_service import GetArtifactRequest +from .types.metadata_service import GetContextRequest +from .types.metadata_service import GetExecutionRequest +from .types.metadata_service import GetMetadataSchemaRequest +from .types.metadata_service import GetMetadataStoreRequest +from .types.metadata_service import ListArtifactsRequest +from .types.metadata_service import ListArtifactsResponse +from .types.metadata_service import ListContextsRequest +from .types.metadata_service import ListContextsResponse +from .types.metadata_service import ListExecutionsRequest +from .types.metadata_service import ListExecutionsResponse +from .types.metadata_service import ListMetadataSchemasRequest +from .types.metadata_service import ListMetadataSchemasResponse +from .types.metadata_service import ListMetadataStoresRequest +from .types.metadata_service import ListMetadataStoresResponse +from .types.metadata_service import PurgeArtifactsMetadata +from .types.metadata_service import PurgeArtifactsRequest +from .types.metadata_service import PurgeArtifactsResponse +from .types.metadata_service import PurgeContextsMetadata +from .types.metadata_service import PurgeContextsRequest +from .types.metadata_service import PurgeContextsResponse +from .types.metadata_service import PurgeExecutionsMetadata +from .types.metadata_service import PurgeExecutionsRequest +from .types.metadata_service import PurgeExecutionsResponse +from .types.metadata_service import QueryArtifactLineageSubgraphRequest +from .types.metadata_service import QueryContextLineageSubgraphRequest +from .types.metadata_service import QueryExecutionInputsAndOutputsRequest +from .types.metadata_service import UpdateArtifactRequest +from .types.metadata_service import UpdateContextRequest +from .types.metadata_service import UpdateExecutionRequest +from .types.metadata_store import MetadataStore +from .types.migratable_resource import MigratableResource +from .types.migration_service import BatchMigrateResourcesOperationMetadata +from .types.migration_service import BatchMigrateResourcesRequest +from .types.migration_service import BatchMigrateResourcesResponse +from .types.migration_service import MigrateResourceRequest +from .types.migration_service import MigrateResourceResponse +from .types.migration_service import SearchMigratableResourcesRequest +from .types.migration_service import SearchMigratableResourcesResponse +from .types.model import Model +from .types.model import ModelContainerSpec +from .types.model import Port +from .types.model import PredictSchemata +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig +from .types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType +from .types.model_evaluation import ModelEvaluation +from .types.model_evaluation_slice import ModelEvaluationSlice +from .types.model_monitoring import ModelMonitoringAlertConfig +from .types.model_monitoring import ModelMonitoringObjectiveConfig +from .types.model_monitoring import SamplingStrategy +from .types.model_monitoring import ThresholdConfig +from .types.model_service import DeleteModelRequest +from .types.model_service import DeleteModelVersionRequest +from .types.model_service import ExportModelOperationMetadata +from .types.model_service import ExportModelRequest +from .types.model_service import ExportModelResponse +from .types.model_service import GetModelEvaluationRequest +from .types.model_service import GetModelEvaluationSliceRequest +from .types.model_service import GetModelRequest +from .types.model_service import ImportModelEvaluationRequest +from .types.model_service import ListModelEvaluationSlicesRequest +from .types.model_service import ListModelEvaluationSlicesResponse +from .types.model_service import ListModelEvaluationsRequest +from .types.model_service import ListModelEvaluationsResponse +from .types.model_service import ListModelsRequest +from .types.model_service import ListModelsResponse +from .types.model_service import ListModelVersionsRequest +from .types.model_service import ListModelVersionsResponse +from .types.model_service import MergeVersionAliasesRequest +from .types.model_service import UpdateModelRequest +from .types.model_service import UploadModelOperationMetadata +from .types.model_service import UploadModelRequest +from .types.model_service import UploadModelResponse +from .types.operation import DeleteOperationMetadata +from .types.operation import GenericOperationMetadata +from .types.pipeline_failure_policy import PipelineFailurePolicy +from .types.pipeline_job import PipelineJob +from .types.pipeline_job import PipelineJobDetail +from .types.pipeline_job import PipelineTaskDetail +from .types.pipeline_job import PipelineTaskExecutorDetail +from .types.pipeline_job import PipelineTemplateMetadata +from .types.pipeline_service import CancelPipelineJobRequest +from .types.pipeline_service import CancelTrainingPipelineRequest +from .types.pipeline_service import CreatePipelineJobRequest +from .types.pipeline_service import CreateTrainingPipelineRequest +from .types.pipeline_service import DeletePipelineJobRequest +from .types.pipeline_service import DeleteTrainingPipelineRequest +from .types.pipeline_service import GetPipelineJobRequest +from .types.pipeline_service import GetTrainingPipelineRequest +from .types.pipeline_service import ListPipelineJobsRequest +from .types.pipeline_service import ListPipelineJobsResponse +from .types.pipeline_service import ListTrainingPipelinesRequest +from .types.pipeline_service import ListTrainingPipelinesResponse +from .types.pipeline_state import PipelineState +from .types.prediction_service import ExplainRequest +from .types.prediction_service import ExplainResponse +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.prediction_service import RawPredictRequest +from .types.specialist_pool import SpecialistPool +from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import CreateSpecialistPoolRequest +from .types.specialist_pool_service import DeleteSpecialistPoolRequest +from .types.specialist_pool_service import GetSpecialistPoolRequest +from .types.specialist_pool_service import ListSpecialistPoolsRequest +from .types.specialist_pool_service import ListSpecialistPoolsResponse +from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import UpdateSpecialistPoolRequest +from .types.study import Measurement +from .types.study import Study +from .types.study import StudySpec +from .types.study import Trial +from .types.tensorboard import Tensorboard +from .types.tensorboard_data import Scalar +from .types.tensorboard_data import TensorboardBlob +from .types.tensorboard_data import TensorboardBlobSequence +from .types.tensorboard_data import TensorboardTensor +from .types.tensorboard_data import TimeSeriesData +from .types.tensorboard_data import TimeSeriesDataPoint +from .types.tensorboard_experiment import TensorboardExperiment +from .types.tensorboard_run import TensorboardRun +from .types.tensorboard_service import BatchCreateTensorboardRunsRequest +from .types.tensorboard_service import BatchCreateTensorboardRunsResponse +from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest +from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse +from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import CreateTensorboardExperimentRequest +from .types.tensorboard_service import CreateTensorboardOperationMetadata +from .types.tensorboard_service import CreateTensorboardRequest +from .types.tensorboard_service import CreateTensorboardRunRequest +from .types.tensorboard_service import CreateTensorboardTimeSeriesRequest +from .types.tensorboard_service import DeleteTensorboardExperimentRequest +from .types.tensorboard_service import DeleteTensorboardRequest +from .types.tensorboard_service import DeleteTensorboardRunRequest +from .types.tensorboard_service import DeleteTensorboardTimeSeriesRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import GetTensorboardExperimentRequest +from .types.tensorboard_service import GetTensorboardRequest +from .types.tensorboard_service import GetTensorboardRunRequest +from .types.tensorboard_service import GetTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardExperimentsRequest +from .types.tensorboard_service import ListTensorboardExperimentsResponse +from .types.tensorboard_service import ListTensorboardRunsRequest +from .types.tensorboard_service import ListTensorboardRunsResponse +from .types.tensorboard_service import ListTensorboardsRequest +from .types.tensorboard_service import ListTensorboardsResponse +from .types.tensorboard_service import ListTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardTimeSeriesResponse +from .types.tensorboard_service import ReadTensorboardBlobDataRequest +from .types.tensorboard_service import ReadTensorboardBlobDataResponse +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import UpdateTensorboardExperimentRequest +from .types.tensorboard_service import UpdateTensorboardOperationMetadata +from .types.tensorboard_service import UpdateTensorboardRequest +from .types.tensorboard_service import UpdateTensorboardRunRequest +from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from .types.tensorboard_service import WriteTensorboardExperimentDataRequest +from .types.tensorboard_service import WriteTensorboardExperimentDataResponse +from .types.tensorboard_service import WriteTensorboardRunDataRequest +from .types.tensorboard_service import WriteTensorboardRunDataResponse +from .types.tensorboard_time_series import TensorboardTimeSeries +from .types.training_pipeline import FilterSplit +from .types.training_pipeline import FractionSplit +from .types.training_pipeline import InputDataConfig +from .types.training_pipeline import PredefinedSplit +from .types.training_pipeline import StratifiedSplit +from .types.training_pipeline import TimestampSplit +from .types.training_pipeline import TrainingPipeline +from .types.types import BoolArray +from .types.types import DoubleArray +from .types.types import Int64Array +from .types.types import StringArray +from .types.unmanaged_container_model import UnmanagedContainerModel +from .types.user_action_reference import UserActionReference +from .types.value import Value +from .types.vizier_service import AddTrialMeasurementRequest +from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata +from .types.vizier_service import CheckTrialEarlyStoppingStateRequest +from .types.vizier_service import CheckTrialEarlyStoppingStateResponse +from .types.vizier_service import CompleteTrialRequest +from .types.vizier_service import CreateStudyRequest +from .types.vizier_service import CreateTrialRequest +from .types.vizier_service import DeleteStudyRequest +from .types.vizier_service import DeleteTrialRequest +from .types.vizier_service import GetStudyRequest +from .types.vizier_service import GetTrialRequest +from .types.vizier_service import ListOptimalTrialsRequest +from .types.vizier_service import ListOptimalTrialsResponse +from .types.vizier_service import ListStudiesRequest +from .types.vizier_service import ListStudiesResponse +from .types.vizier_service import ListTrialsRequest +from .types.vizier_service import ListTrialsResponse +from .types.vizier_service import LookupStudyRequest +from .types.vizier_service import StopTrialRequest +from .types.vizier_service import SuggestTrialsMetadata +from .types.vizier_service import SuggestTrialsRequest +from .types.vizier_service import SuggestTrialsResponse + +__all__ = ( + 'DatasetServiceAsyncClient', + 'EndpointServiceAsyncClient', + 'FeaturestoreOnlineServingServiceAsyncClient', + 'FeaturestoreServiceAsyncClient', + 'IndexEndpointServiceAsyncClient', + 'IndexServiceAsyncClient', + 'JobServiceAsyncClient', + 'MetadataServiceAsyncClient', + 'MigrationServiceAsyncClient', + 'ModelServiceAsyncClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceAsyncClient', + 'TensorboardServiceAsyncClient', + 'VizierServiceAsyncClient', +'AcceleratorType', +'ActiveLearningConfig', +'AddContextArtifactsAndExecutionsRequest', +'AddContextArtifactsAndExecutionsResponse', +'AddContextChildrenRequest', +'AddContextChildrenResponse', +'AddExecutionEventsRequest', +'AddExecutionEventsResponse', +'AddTrialMeasurementRequest', +'Annotation', +'AnnotationSpec', +'Artifact', +'Attribution', +'AutomaticResources', +'AutoscalingMetricSpec', +'AvroSource', +'BatchCreateFeaturesOperationMetadata', +'BatchCreateFeaturesRequest', +'BatchCreateFeaturesResponse', +'BatchCreateTensorboardRunsRequest', +'BatchCreateTensorboardRunsResponse', +'BatchCreateTensorboardTimeSeriesRequest', +'BatchCreateTensorboardTimeSeriesResponse', +'BatchDedicatedResources', +'BatchMigrateResourcesOperationMetadata', +'BatchMigrateResourcesRequest', +'BatchMigrateResourcesResponse', +'BatchPredictionJob', +'BatchReadFeatureValuesOperationMetadata', +'BatchReadFeatureValuesRequest', +'BatchReadFeatureValuesResponse', +'BatchReadTensorboardTimeSeriesDataRequest', +'BatchReadTensorboardTimeSeriesDataResponse', +'BigQueryDestination', +'BigQuerySource', +'BlurBaselineConfig', +'BoolArray', +'CancelBatchPredictionJobRequest', +'CancelCustomJobRequest', +'CancelDataLabelingJobRequest', +'CancelHyperparameterTuningJobRequest', +'CancelPipelineJobRequest', +'CancelTrainingPipelineRequest', +'CheckTrialEarlyStoppingStateMetatdata', +'CheckTrialEarlyStoppingStateRequest', +'CheckTrialEarlyStoppingStateResponse', +'CompleteTrialRequest', +'CompletionStats', +'ContainerRegistryDestination', +'ContainerSpec', +'Context', +'CreateArtifactRequest', +'CreateBatchPredictionJobRequest', +'CreateContextRequest', +'CreateCustomJobRequest', +'CreateDataLabelingJobRequest', +'CreateDatasetOperationMetadata', +'CreateDatasetRequest', +'CreateEndpointOperationMetadata', +'CreateEndpointRequest', +'CreateEntityTypeOperationMetadata', +'CreateEntityTypeRequest', +'CreateExecutionRequest', +'CreateFeatureOperationMetadata', +'CreateFeatureRequest', +'CreateFeaturestoreOperationMetadata', +'CreateFeaturestoreRequest', +'CreateHyperparameterTuningJobRequest', +'CreateIndexEndpointOperationMetadata', +'CreateIndexEndpointRequest', +'CreateIndexOperationMetadata', +'CreateIndexRequest', +'CreateMetadataSchemaRequest', +'CreateMetadataStoreOperationMetadata', +'CreateMetadataStoreRequest', +'CreateModelDeploymentMonitoringJobRequest', +'CreatePipelineJobRequest', +'CreateSpecialistPoolOperationMetadata', +'CreateSpecialistPoolRequest', +'CreateStudyRequest', +'CreateTensorboardExperimentRequest', +'CreateTensorboardOperationMetadata', +'CreateTensorboardRequest', +'CreateTensorboardRunRequest', +'CreateTensorboardTimeSeriesRequest', +'CreateTrainingPipelineRequest', +'CreateTrialRequest', +'CsvDestination', +'CsvSource', +'CustomJob', +'CustomJobSpec', +'DataItem', +'DataLabelingJob', +'Dataset', +'DatasetServiceClient', +'DedicatedResources', +'DeleteArtifactRequest', +'DeleteBatchPredictionJobRequest', +'DeleteContextRequest', +'DeleteCustomJobRequest', +'DeleteDataLabelingJobRequest', +'DeleteDatasetRequest', +'DeleteEndpointRequest', +'DeleteEntityTypeRequest', +'DeleteExecutionRequest', +'DeleteFeatureRequest', +'DeleteFeaturestoreRequest', +'DeleteHyperparameterTuningJobRequest', +'DeleteIndexEndpointRequest', +'DeleteIndexRequest', +'DeleteMetadataStoreOperationMetadata', +'DeleteMetadataStoreRequest', +'DeleteModelDeploymentMonitoringJobRequest', +'DeleteModelRequest', +'DeleteModelVersionRequest', +'DeleteOperationMetadata', +'DeletePipelineJobRequest', +'DeleteSpecialistPoolRequest', +'DeleteStudyRequest', +'DeleteTensorboardExperimentRequest', +'DeleteTensorboardRequest', +'DeleteTensorboardRunRequest', +'DeleteTensorboardTimeSeriesRequest', +'DeleteTrainingPipelineRequest', +'DeleteTrialRequest', +'DeployIndexOperationMetadata', +'DeployIndexRequest', +'DeployIndexResponse', +'DeployModelOperationMetadata', +'DeployModelRequest', +'DeployModelResponse', +'DeployedIndex', +'DeployedIndexAuthConfig', +'DeployedIndexRef', +'DeployedModel', +'DeployedModelRef', +'DestinationFeatureSetting', +'DiskSpec', +'DoubleArray', +'EncryptionSpec', +'Endpoint', +'EndpointServiceClient', +'EntityType', +'EnvVar', +'Event', +'ExamplesOverride', +'ExamplesRestrictionsNamespace', +'Execution', +'ExplainRequest', +'ExplainResponse', +'Explanation', +'ExplanationMetadata', +'ExplanationMetadataOverride', +'ExplanationParameters', +'ExplanationSpec', +'ExplanationSpecOverride', +'ExportDataConfig', +'ExportDataOperationMetadata', +'ExportDataRequest', +'ExportDataResponse', +'ExportFeatureValuesOperationMetadata', +'ExportFeatureValuesRequest', +'ExportFeatureValuesResponse', +'ExportModelOperationMetadata', +'ExportModelRequest', +'ExportModelResponse', +'ExportTensorboardTimeSeriesDataRequest', +'ExportTensorboardTimeSeriesDataResponse', +'Feature', +'FeatureNoiseSigma', +'FeatureSelector', +'FeatureStatsAnomaly', +'FeatureValue', +'FeatureValueDestination', +'FeatureValueList', +'Featurestore', +'FeaturestoreMonitoringConfig', +'FeaturestoreOnlineServingServiceClient', +'FeaturestoreServiceClient', +'FilterSplit', +'FractionSplit', +'GcsDestination', +'GcsSource', +'GenericOperationMetadata', +'GetAnnotationSpecRequest', +'GetArtifactRequest', +'GetBatchPredictionJobRequest', +'GetContextRequest', +'GetCustomJobRequest', +'GetDataLabelingJobRequest', +'GetDatasetRequest', +'GetEndpointRequest', +'GetEntityTypeRequest', +'GetExecutionRequest', +'GetFeatureRequest', +'GetFeaturestoreRequest', +'GetHyperparameterTuningJobRequest', +'GetIndexEndpointRequest', +'GetIndexRequest', +'GetMetadataSchemaRequest', +'GetMetadataStoreRequest', +'GetModelDeploymentMonitoringJobRequest', +'GetModelEvaluationRequest', +'GetModelEvaluationSliceRequest', +'GetModelRequest', +'GetPipelineJobRequest', +'GetSpecialistPoolRequest', +'GetStudyRequest', +'GetTensorboardExperimentRequest', +'GetTensorboardRequest', +'GetTensorboardRunRequest', +'GetTensorboardTimeSeriesRequest', +'GetTrainingPipelineRequest', +'GetTrialRequest', +'HyperparameterTuningJob', +'IdMatcher', +'ImportDataConfig', +'ImportDataOperationMetadata', +'ImportDataRequest', +'ImportDataResponse', +'ImportFeatureValuesOperationMetadata', +'ImportFeatureValuesRequest', +'ImportFeatureValuesResponse', +'ImportModelEvaluationRequest', +'Index', +'IndexEndpoint', +'IndexEndpointServiceClient', +'IndexPrivateEndpoints', +'IndexServiceClient', +'InputDataConfig', +'Int64Array', +'IntegratedGradientsAttribution', +'JobServiceClient', +'JobState', +'LineageSubgraph', +'ListAnnotationsRequest', +'ListAnnotationsResponse', +'ListArtifactsRequest', +'ListArtifactsResponse', +'ListBatchPredictionJobsRequest', +'ListBatchPredictionJobsResponse', +'ListContextsRequest', +'ListContextsResponse', +'ListCustomJobsRequest', +'ListCustomJobsResponse', +'ListDataItemsRequest', +'ListDataItemsResponse', +'ListDataLabelingJobsRequest', +'ListDataLabelingJobsResponse', +'ListDatasetsRequest', +'ListDatasetsResponse', +'ListEndpointsRequest', +'ListEndpointsResponse', +'ListEntityTypesRequest', +'ListEntityTypesResponse', +'ListExecutionsRequest', +'ListExecutionsResponse', +'ListFeaturesRequest', +'ListFeaturesResponse', +'ListFeaturestoresRequest', +'ListFeaturestoresResponse', +'ListHyperparameterTuningJobsRequest', +'ListHyperparameterTuningJobsResponse', +'ListIndexEndpointsRequest', +'ListIndexEndpointsResponse', +'ListIndexesRequest', +'ListIndexesResponse', +'ListMetadataSchemasRequest', +'ListMetadataSchemasResponse', +'ListMetadataStoresRequest', +'ListMetadataStoresResponse', +'ListModelDeploymentMonitoringJobsRequest', +'ListModelDeploymentMonitoringJobsResponse', +'ListModelEvaluationSlicesRequest', +'ListModelEvaluationSlicesResponse', +'ListModelEvaluationsRequest', +'ListModelEvaluationsResponse', +'ListModelVersionsRequest', +'ListModelVersionsResponse', +'ListModelsRequest', +'ListModelsResponse', +'ListOptimalTrialsRequest', +'ListOptimalTrialsResponse', +'ListPipelineJobsRequest', +'ListPipelineJobsResponse', +'ListSpecialistPoolsRequest', +'ListSpecialistPoolsResponse', +'ListStudiesRequest', +'ListStudiesResponse', +'ListTensorboardExperimentsRequest', +'ListTensorboardExperimentsResponse', +'ListTensorboardRunsRequest', +'ListTensorboardRunsResponse', +'ListTensorboardTimeSeriesRequest', +'ListTensorboardTimeSeriesResponse', +'ListTensorboardsRequest', +'ListTensorboardsResponse', +'ListTrainingPipelinesRequest', +'ListTrainingPipelinesResponse', +'ListTrialsRequest', +'ListTrialsResponse', +'LookupStudyRequest', +'MachineSpec', +'ManualBatchTuningParameters', +'Measurement', +'MergeVersionAliasesRequest', +'MetadataSchema', +'MetadataServiceClient', +'MetadataStore', +'MigratableResource', +'MigrateResourceRequest', +'MigrateResourceResponse', +'MigrationServiceClient', +'Model', +'ModelContainerSpec', +'ModelDeploymentMonitoringBigQueryTable', +'ModelDeploymentMonitoringJob', +'ModelDeploymentMonitoringObjectiveConfig', +'ModelDeploymentMonitoringObjectiveType', +'ModelDeploymentMonitoringScheduleConfig', +'ModelEvaluation', +'ModelEvaluationSlice', +'ModelExplanation', +'ModelMonitoringAlertConfig', +'ModelMonitoringObjectiveConfig', +'ModelMonitoringStatsAnomalies', +'ModelServiceClient', +'MutateDeployedIndexOperationMetadata', +'MutateDeployedIndexRequest', +'MutateDeployedIndexResponse', +'NearestNeighborSearchOperationMetadata', +'Neighbor', +'NfsMount', +'PauseModelDeploymentMonitoringJobRequest', +'PipelineFailurePolicy', +'PipelineJob', +'PipelineJobDetail', +'PipelineServiceClient', +'PipelineState', +'PipelineTaskDetail', +'PipelineTaskExecutorDetail', +'PipelineTemplateMetadata', +'Port', +'PredefinedSplit', +'PredictRequest', +'PredictRequestResponseLoggingConfig', +'PredictResponse', +'PredictSchemata', +'PredictionServiceClient', +'PrivateEndpoints', +'PurgeArtifactsMetadata', +'PurgeArtifactsRequest', +'PurgeArtifactsResponse', +'PurgeContextsMetadata', +'PurgeContextsRequest', +'PurgeContextsResponse', +'PurgeExecutionsMetadata', +'PurgeExecutionsRequest', +'PurgeExecutionsResponse', +'PythonPackageSpec', +'QueryArtifactLineageSubgraphRequest', +'QueryContextLineageSubgraphRequest', +'QueryExecutionInputsAndOutputsRequest', +'RawPredictRequest', +'ReadFeatureValuesRequest', +'ReadFeatureValuesResponse', +'ReadTensorboardBlobDataRequest', +'ReadTensorboardBlobDataResponse', +'ReadTensorboardTimeSeriesDataRequest', +'ReadTensorboardTimeSeriesDataResponse', +'ResourcesConsumed', +'ResumeModelDeploymentMonitoringJobRequest', +'SampleConfig', +'SampledShapleyAttribution', +'SamplingStrategy', +'Scalar', +'Scheduling', +'SearchFeaturesRequest', +'SearchFeaturesResponse', +'SearchMigratableResourcesRequest', +'SearchMigratableResourcesResponse', +'SearchModelDeploymentMonitoringStatsAnomaliesRequest', +'SearchModelDeploymentMonitoringStatsAnomaliesResponse', +'SmoothGradConfig', +'SpecialistPool', +'SpecialistPoolServiceClient', +'StopTrialRequest', +'StratifiedSplit', +'StreamingReadFeatureValuesRequest', +'StringArray', +'Study', +'StudySpec', +'SuggestTrialsMetadata', +'SuggestTrialsRequest', +'SuggestTrialsResponse', +'TFRecordDestination', +'Tensorboard', +'TensorboardBlob', +'TensorboardBlobSequence', +'TensorboardExperiment', +'TensorboardRun', +'TensorboardServiceClient', +'TensorboardTensor', +'TensorboardTimeSeries', +'ThresholdConfig', +'TimeSeriesData', +'TimeSeriesDataPoint', +'TimestampSplit', +'TrainingConfig', +'TrainingPipeline', +'Trial', +'UndeployIndexOperationMetadata', +'UndeployIndexRequest', +'UndeployIndexResponse', +'UndeployModelOperationMetadata', +'UndeployModelRequest', +'UndeployModelResponse', +'UnmanagedContainerModel', +'UpdateArtifactRequest', +'UpdateContextRequest', +'UpdateDatasetRequest', +'UpdateEndpointRequest', +'UpdateEntityTypeRequest', +'UpdateExecutionRequest', +'UpdateFeatureRequest', +'UpdateFeaturestoreOperationMetadata', +'UpdateFeaturestoreRequest', +'UpdateIndexEndpointRequest', +'UpdateIndexOperationMetadata', +'UpdateIndexRequest', +'UpdateModelDeploymentMonitoringJobOperationMetadata', +'UpdateModelDeploymentMonitoringJobRequest', +'UpdateModelRequest', +'UpdateSpecialistPoolOperationMetadata', +'UpdateSpecialistPoolRequest', +'UpdateTensorboardExperimentRequest', +'UpdateTensorboardOperationMetadata', +'UpdateTensorboardRequest', +'UpdateTensorboardRunRequest', +'UpdateTensorboardTimeSeriesRequest', +'UploadModelOperationMetadata', +'UploadModelRequest', +'UploadModelResponse', +'UserActionReference', +'Value', +'VizierServiceClient', +'WorkerPoolSpec', +'WriteTensorboardExperimentDataRequest', +'WriteTensorboardExperimentDataResponse', +'WriteTensorboardRunDataRequest', +'WriteTensorboardRunDataResponse', +'XraiAttribution', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json new file mode 100644 index 0000000000..35cf881fe2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json @@ -0,0 +1,2099 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform_v1", + "protoPackage": "google.cloud.aiplatform.v1", + "schema": "1.0", + "services": { + "DatasetService": { + "clients": { + "grpc": { + "libraryClient": "DatasetServiceClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + }, + "grpc-async": { + "libraryClient": "DatasetServiceAsyncClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + } + } + }, + "EndpointService": { + "clients": { + "grpc": { + "libraryClient": "EndpointServiceClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + }, + "grpc-async": { + "libraryClient": "EndpointServiceAsyncClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + } + } + }, + "FeaturestoreOnlineServingService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreOnlineServingServiceClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreOnlineServingServiceAsyncClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + } + } + }, + "FeaturestoreService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreServiceClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreServiceAsyncClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + } + } + }, + "IndexEndpointService": { + "clients": { + "grpc": { + "libraryClient": "IndexEndpointServiceClient", + "rpcs": { + "CreateIndexEndpoint": { + "methods": [ + "create_index_endpoint" + ] + }, + "DeleteIndexEndpoint": { + "methods": [ + "delete_index_endpoint" + ] + }, + "DeployIndex": { + "methods": [ + "deploy_index" + ] + }, + "GetIndexEndpoint": { + "methods": [ + "get_index_endpoint" + ] + }, + "ListIndexEndpoints": { + "methods": [ + "list_index_endpoints" + ] + }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, + "UndeployIndex": { + "methods": [ + "undeploy_index" + ] + }, + "UpdateIndexEndpoint": { + "methods": [ + "update_index_endpoint" + ] + } + } + }, + "grpc-async": { + "libraryClient": "IndexEndpointServiceAsyncClient", + "rpcs": { + "CreateIndexEndpoint": { + "methods": [ + "create_index_endpoint" + ] + }, + "DeleteIndexEndpoint": { + "methods": [ + "delete_index_endpoint" + ] + }, + "DeployIndex": { + "methods": [ + "deploy_index" + ] + }, + "GetIndexEndpoint": { + "methods": [ + "get_index_endpoint" + ] + }, + "ListIndexEndpoints": { + "methods": [ + "list_index_endpoints" + ] + }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, + "UndeployIndex": { + "methods": [ + "undeploy_index" + ] + }, + "UpdateIndexEndpoint": { + "methods": [ + "update_index_endpoint" + ] + } + } + } + } + }, + "IndexService": { + "clients": { + "grpc": { + "libraryClient": "IndexServiceClient", + "rpcs": { + "CreateIndex": { + "methods": [ + "create_index" + ] + }, + "DeleteIndex": { + "methods": [ + "delete_index" + ] + }, + "GetIndex": { + "methods": [ + "get_index" + ] + }, + "ListIndexes": { + "methods": [ + "list_indexes" + ] + }, + "UpdateIndex": { + "methods": [ + "update_index" + ] + } + } + }, + "grpc-async": { + "libraryClient": "IndexServiceAsyncClient", + "rpcs": { + "CreateIndex": { + "methods": [ + "create_index" + ] + }, + "DeleteIndex": { + "methods": [ + "delete_index" + ] + }, + "GetIndex": { + "methods": [ + "get_index" + ] + }, + "ListIndexes": { + "methods": [ + "list_indexes" + ] + }, + "UpdateIndex": { + "methods": [ + "update_index" + ] + } + } + } + } + }, + "JobService": { + "clients": { + "grpc": { + "libraryClient": "JobServiceClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "CreateModelDeploymentMonitoringJob": { + "methods": [ + "create_model_deployment_monitoring_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "DeleteModelDeploymentMonitoringJob": { + "methods": [ + "delete_model_deployment_monitoring_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "GetModelDeploymentMonitoringJob": { + "methods": [ + "get_model_deployment_monitoring_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + }, + "ListModelDeploymentMonitoringJobs": { + "methods": [ + "list_model_deployment_monitoring_jobs" + ] + }, + "PauseModelDeploymentMonitoringJob": { + "methods": [ + "pause_model_deployment_monitoring_job" + ] + }, + "ResumeModelDeploymentMonitoringJob": { + "methods": [ + "resume_model_deployment_monitoring_job" + ] + }, + "SearchModelDeploymentMonitoringStatsAnomalies": { + "methods": [ + "search_model_deployment_monitoring_stats_anomalies" + ] + }, + "UpdateModelDeploymentMonitoringJob": { + "methods": [ + "update_model_deployment_monitoring_job" + ] + } + } + }, + "grpc-async": { + "libraryClient": "JobServiceAsyncClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "CreateModelDeploymentMonitoringJob": { + "methods": [ + "create_model_deployment_monitoring_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "DeleteModelDeploymentMonitoringJob": { + "methods": [ + "delete_model_deployment_monitoring_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "GetModelDeploymentMonitoringJob": { + "methods": [ + "get_model_deployment_monitoring_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + }, + "ListModelDeploymentMonitoringJobs": { + "methods": [ + "list_model_deployment_monitoring_jobs" + ] + }, + "PauseModelDeploymentMonitoringJob": { + "methods": [ + "pause_model_deployment_monitoring_job" + ] + }, + "ResumeModelDeploymentMonitoringJob": { + "methods": [ + "resume_model_deployment_monitoring_job" + ] + }, + "SearchModelDeploymentMonitoringStatsAnomalies": { + "methods": [ + "search_model_deployment_monitoring_stats_anomalies" + ] + }, + "UpdateModelDeploymentMonitoringJob": { + "methods": [ + "update_model_deployment_monitoring_job" + ] + } + } + } + } + }, + "MetadataService": { + "clients": { + "grpc": { + "libraryClient": "MetadataServiceClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteArtifact": { + "methods": [ + "delete_artifact" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteExecution": { + "methods": [ + "delete_execution" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "PurgeArtifacts": { + "methods": [ + "purge_artifacts" + ] + }, + "PurgeContexts": { + "methods": [ + "purge_contexts" + ] + }, + "PurgeExecutions": { + "methods": [ + "purge_executions" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MetadataServiceAsyncClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteArtifact": { + "methods": [ + "delete_artifact" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteExecution": { + "methods": [ + "delete_execution" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "PurgeArtifacts": { + "methods": [ + "purge_artifacts" + ] + }, + "PurgeContexts": { + "methods": [ + "purge_contexts" + ] + }, + "PurgeExecutions": { + "methods": [ + "purge_executions" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + } + } + }, + "MigrationService": { + "clients": { + "grpc": { + "libraryClient": "MigrationServiceClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MigrationServiceAsyncClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + } + } + }, + "ModelService": { + "clients": { + "grpc": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeleteModelVersion": { + "methods": [ + "delete_model_version" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModelVersions": { + "methods": [ + "list_model_versions" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "MergeVersionAliases": { + "methods": [ + "merge_version_aliases" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ModelServiceAsyncClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeleteModelVersion": { + "methods": [ + "delete_model_version" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModelVersions": { + "methods": [ + "list_model_versions" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "MergeVersionAliases": { + "methods": [ + "merge_version_aliases" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + } + } + }, + "PipelineService": { + "clients": { + "grpc": { + "libraryClient": "PipelineServiceClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PipelineServiceAsyncClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + } + } + }, + "PredictionService": { + "clients": { + "grpc": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "Explain": { + "methods": [ + "explain" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + }, + "RawPredict": { + "methods": [ + "raw_predict" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PredictionServiceAsyncClient", + "rpcs": { + "Explain": { + "methods": [ + "explain" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + }, + "RawPredict": { + "methods": [ + "raw_predict" + ] + } + } + } + } + }, + "SpecialistPoolService": { + "clients": { + "grpc": { + "libraryClient": "SpecialistPoolServiceClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + }, + "grpc-async": { + "libraryClient": "SpecialistPoolServiceAsyncClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + } + } + }, + "TensorboardService": { + "clients": { + "grpc": { + "libraryClient": "TensorboardServiceClient", + "rpcs": { + "BatchCreateTensorboardRuns": { + "methods": [ + "batch_create_tensorboard_runs" + ] + }, + "BatchCreateTensorboardTimeSeries": { + "methods": [ + "batch_create_tensorboard_time_series" + ] + }, + "BatchReadTensorboardTimeSeriesData": { + "methods": [ + "batch_read_tensorboard_time_series_data" + ] + }, + "CreateTensorboard": { + "methods": [ + "create_tensorboard" + ] + }, + "CreateTensorboardExperiment": { + "methods": [ + "create_tensorboard_experiment" + ] + }, + "CreateTensorboardRun": { + "methods": [ + "create_tensorboard_run" + ] + }, + "CreateTensorboardTimeSeries": { + "methods": [ + "create_tensorboard_time_series" + ] + }, + "DeleteTensorboard": { + "methods": [ + "delete_tensorboard" + ] + }, + "DeleteTensorboardExperiment": { + "methods": [ + "delete_tensorboard_experiment" + ] + }, + "DeleteTensorboardRun": { + "methods": [ + "delete_tensorboard_run" + ] + }, + "DeleteTensorboardTimeSeries": { + "methods": [ + "delete_tensorboard_time_series" + ] + }, + "ExportTensorboardTimeSeriesData": { + "methods": [ + "export_tensorboard_time_series_data" + ] + }, + "GetTensorboard": { + "methods": [ + "get_tensorboard" + ] + }, + "GetTensorboardExperiment": { + "methods": [ + "get_tensorboard_experiment" + ] + }, + "GetTensorboardRun": { + "methods": [ + "get_tensorboard_run" + ] + }, + "GetTensorboardTimeSeries": { + "methods": [ + "get_tensorboard_time_series" + ] + }, + "ListTensorboardExperiments": { + "methods": [ + "list_tensorboard_experiments" + ] + }, + "ListTensorboardRuns": { + "methods": [ + "list_tensorboard_runs" + ] + }, + "ListTensorboardTimeSeries": { + "methods": [ + "list_tensorboard_time_series" + ] + }, + "ListTensorboards": { + "methods": [ + "list_tensorboards" + ] + }, + "ReadTensorboardBlobData": { + "methods": [ + "read_tensorboard_blob_data" + ] + }, + "ReadTensorboardTimeSeriesData": { + "methods": [ + "read_tensorboard_time_series_data" + ] + }, + "UpdateTensorboard": { + "methods": [ + "update_tensorboard" + ] + }, + "UpdateTensorboardExperiment": { + "methods": [ + "update_tensorboard_experiment" + ] + }, + "UpdateTensorboardRun": { + "methods": [ + "update_tensorboard_run" + ] + }, + "UpdateTensorboardTimeSeries": { + "methods": [ + "update_tensorboard_time_series" + ] + }, + "WriteTensorboardExperimentData": { + "methods": [ + "write_tensorboard_experiment_data" + ] + }, + "WriteTensorboardRunData": { + "methods": [ + "write_tensorboard_run_data" + ] + } + } + }, + "grpc-async": { + "libraryClient": "TensorboardServiceAsyncClient", + "rpcs": { + "BatchCreateTensorboardRuns": { + "methods": [ + "batch_create_tensorboard_runs" + ] + }, + "BatchCreateTensorboardTimeSeries": { + "methods": [ + "batch_create_tensorboard_time_series" + ] + }, + "BatchReadTensorboardTimeSeriesData": { + "methods": [ + "batch_read_tensorboard_time_series_data" + ] + }, + "CreateTensorboard": { + "methods": [ + "create_tensorboard" + ] + }, + "CreateTensorboardExperiment": { + "methods": [ + "create_tensorboard_experiment" + ] + }, + "CreateTensorboardRun": { + "methods": [ + "create_tensorboard_run" + ] + }, + "CreateTensorboardTimeSeries": { + "methods": [ + "create_tensorboard_time_series" + ] + }, + "DeleteTensorboard": { + "methods": [ + "delete_tensorboard" + ] + }, + "DeleteTensorboardExperiment": { + "methods": [ + "delete_tensorboard_experiment" + ] + }, + "DeleteTensorboardRun": { + "methods": [ + "delete_tensorboard_run" + ] + }, + "DeleteTensorboardTimeSeries": { + "methods": [ + "delete_tensorboard_time_series" + ] + }, + "ExportTensorboardTimeSeriesData": { + "methods": [ + "export_tensorboard_time_series_data" + ] + }, + "GetTensorboard": { + "methods": [ + "get_tensorboard" + ] + }, + "GetTensorboardExperiment": { + "methods": [ + "get_tensorboard_experiment" + ] + }, + "GetTensorboardRun": { + "methods": [ + "get_tensorboard_run" + ] + }, + "GetTensorboardTimeSeries": { + "methods": [ + "get_tensorboard_time_series" + ] + }, + "ListTensorboardExperiments": { + "methods": [ + "list_tensorboard_experiments" + ] + }, + "ListTensorboardRuns": { + "methods": [ + "list_tensorboard_runs" + ] + }, + "ListTensorboardTimeSeries": { + "methods": [ + "list_tensorboard_time_series" + ] + }, + "ListTensorboards": { + "methods": [ + "list_tensorboards" + ] + }, + "ReadTensorboardBlobData": { + "methods": [ + "read_tensorboard_blob_data" + ] + }, + "ReadTensorboardTimeSeriesData": { + "methods": [ + "read_tensorboard_time_series_data" + ] + }, + "UpdateTensorboard": { + "methods": [ + "update_tensorboard" + ] + }, + "UpdateTensorboardExperiment": { + "methods": [ + "update_tensorboard_experiment" + ] + }, + "UpdateTensorboardRun": { + "methods": [ + "update_tensorboard_run" + ] + }, + "UpdateTensorboardTimeSeries": { + "methods": [ + "update_tensorboard_time_series" + ] + }, + "WriteTensorboardExperimentData": { + "methods": [ + "write_tensorboard_experiment_data" + ] + }, + "WriteTensorboardRunData": { + "methods": [ + "write_tensorboard_run_data" + ] + } + } + } + } + }, + "VizierService": { + "clients": { + "grpc": { + "libraryClient": "VizierServiceClient", + "rpcs": { + "AddTrialMeasurement": { + "methods": [ + "add_trial_measurement" + ] + }, + "CheckTrialEarlyStoppingState": { + "methods": [ + "check_trial_early_stopping_state" + ] + }, + "CompleteTrial": { + "methods": [ + "complete_trial" + ] + }, + "CreateStudy": { + "methods": [ + "create_study" + ] + }, + "CreateTrial": { + "methods": [ + "create_trial" + ] + }, + "DeleteStudy": { + "methods": [ + "delete_study" + ] + }, + "DeleteTrial": { + "methods": [ + "delete_trial" + ] + }, + "GetStudy": { + "methods": [ + "get_study" + ] + }, + "GetTrial": { + "methods": [ + "get_trial" + ] + }, + "ListOptimalTrials": { + "methods": [ + "list_optimal_trials" + ] + }, + "ListStudies": { + "methods": [ + "list_studies" + ] + }, + "ListTrials": { + "methods": [ + "list_trials" + ] + }, + "LookupStudy": { + "methods": [ + "lookup_study" + ] + }, + "StopTrial": { + "methods": [ + "stop_trial" + ] + }, + "SuggestTrials": { + "methods": [ + "suggest_trials" + ] + } + } + }, + "grpc-async": { + "libraryClient": "VizierServiceAsyncClient", + "rpcs": { + "AddTrialMeasurement": { + "methods": [ + "add_trial_measurement" + ] + }, + "CheckTrialEarlyStoppingState": { + "methods": [ + "check_trial_early_stopping_state" + ] + }, + "CompleteTrial": { + "methods": [ + "complete_trial" + ] + }, + "CreateStudy": { + "methods": [ + "create_study" + ] + }, + "CreateTrial": { + "methods": [ + "create_trial" + ] + }, + "DeleteStudy": { + "methods": [ + "delete_study" + ] + }, + "DeleteTrial": { + "methods": [ + "delete_trial" + ] + }, + "GetStudy": { + "methods": [ + "get_study" + ] + }, + "GetTrial": { + "methods": [ + "get_trial" + ] + }, + "ListOptimalTrials": { + "methods": [ + "list_optimal_trials" + ] + }, + "ListStudies": { + "methods": [ + "list_studies" + ] + }, + "ListTrials": { + "methods": [ + "list_trials" + ] + }, + "LookupStudy": { + "methods": [ + "lookup_study" + ] + }, + "StopTrial": { + "methods": [ + "stop_trial" + ] + }, + "SuggestTrials": { + "methods": [ + "suggest_trials" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py new file mode 100644 index 0000000000..3597de859d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DatasetServiceClient +from .async_client import DatasetServiceAsyncClient + +__all__ = ( + 'DatasetServiceClient', + 'DatasetServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py new file mode 100644 index 0000000000..10f105ca1d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -0,0 +1,1989 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.dataset_service import pagers +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport +from .client import DatasetServiceClient + + +class DatasetServiceAsyncClient: + """The service that handles the CRUD of Vertex AI Dataset and + its child resources. + """ + + _client: DatasetServiceClient + + DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT + + annotation_path = staticmethod(DatasetServiceClient.annotation_path) + parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) + annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) + parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) + data_item_path = staticmethod(DatasetServiceClient.data_item_path) + parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) + dataset_path = staticmethod(DatasetServiceClient.dataset_path) + parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) + common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) + common_project_path = staticmethod(DatasetServiceClient.common_project_path) + parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) + common_location_path = staticmethod(DatasetServiceClient.common_location_path) + parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return DatasetServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = DatasetServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_dataset(self, + request: Union[dataset_service.CreateDatasetRequest, dict] = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_dataset(self, + request: Union[dataset_service.GetDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. + name (:class:`str`): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset(self, + request: Union[dataset_service.UpdateDatasetRequest, dict] = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. + dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_datasets(self, + request: Union[dataset_service.ListDatasetsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists Datasets in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_datasets(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListDatasetsRequest, dict]): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + parent (:class:`str`): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_dataset(self, + request: Union[dataset_service.DeleteDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteDatasetRequest, dict]): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data(self, + request: Union[dataset_service.ImportDataRequest, dict] = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_import_data(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportDataRequest, dict]): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (:class:`Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]`): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs: + request.import_configs.extend(import_configs) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data(self, + request: Union[dataset_service.ExportDataRequest, dict] = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports data from a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_export_data(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + export_config = aiplatform_v1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportDataRequest, dict]): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (:class:`google.cloud.aiplatform_v1.types.ExportDataConfig`): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_data_items(self, + request: Union[dataset_service.ListDataItemsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsAsyncPager: + r"""Lists DataItems in a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_data_items(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListDataItemsRequest, dict]): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + parent (:class:`str`): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDataItemsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_items, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataItemsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec(self, + request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest, dict]): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. + name (:class:`str`): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_annotations(self, + request: Union[dataset_service.ListAnnotationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsAsyncPager: + r"""Lists Annotations belongs to a dataitem + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_annotations(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListAnnotationsRequest, dict]): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + parent (:class:`str`): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListAnnotationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_annotations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAnnotationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py new file mode 100644 index 0000000000..f8b5c45dd8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -0,0 +1,2210 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.dataset_service import pagers +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import DatasetServiceGrpcTransport +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +class DatasetServiceClientMeta(type): + """Metaclass for the DatasetService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] + _transport_registry["grpc"] = DatasetServiceGrpcTransport + _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[DatasetServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DatasetServiceClient(metaclass=DatasetServiceClientMeta): + """The service that handles the CRUD of Vertex AI Dataset and + its child resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: + """Returns a fully-qualified annotation string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + + @staticmethod + def parse_annotation_path(path: str) -> Dict[str,str]: + """Parses a annotation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: + """Returns a fully-qualified annotation_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + + @staticmethod + def parse_annotation_spec_path(path: str) -> Dict[str,str]: + """Parses a annotation_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: + """Returns a fully-qualified data_item string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + + @staticmethod + def parse_data_item_path(path: str) -> Dict[str,str]: + """Parses a data_item path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DatasetServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DatasetServiceTransport): + # transport is a DatasetServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_dataset(self, + request: Union[dataset_service.CreateDatasetRequest, dict] = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + parent (str): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.CreateDatasetRequest): + request = dataset_service.CreateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + def get_dataset(self, + request: Union[dataset_service.GetDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. + name (str): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetDatasetRequest): + request = dataset_service.GetDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset(self, + request: Union[dataset_service.UpdateDatasetRequest, dict] = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.UpdateDatasetRequest): + request = dataset_service.UpdateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_datasets(self, + request: Union[dataset_service.ListDatasetsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists Datasets in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_datasets(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListDatasetsRequest, dict]): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + parent (str): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDatasetsRequest): + request = dataset_service.ListDatasetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_dataset(self, + request: Union[dataset_service.DeleteDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteDatasetRequest, dict]): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.DeleteDatasetRequest): + request = dataset_service.DeleteDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_data(self, + request: Union[dataset_service.ImportDataRequest, dict] = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports data into a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_import_data(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportDataRequest, dict]): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ImportDataRequest): + request = dataset_service.ImportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs is not None: + request.import_configs = import_configs + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def export_data(self, + request: Union[dataset_service.ExportDataRequest, dict] = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports data from a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_export_data(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + export_config = aiplatform_v1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportDataRequest, dict]): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ExportDataRequest): + request = dataset_service.ExportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def list_data_items(self, + request: Union[dataset_service.ListDataItemsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsPager: + r"""Lists DataItems in a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_data_items(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListDataItemsRequest, dict]): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + parent (str): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDataItemsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDataItemsRequest): + request = dataset_service.ListDataItemsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_items] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataItemsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec(self, + request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest, dict]): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. + name (str): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetAnnotationSpecRequest): + request = dataset_service.GetAnnotationSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_annotations(self, + request: Union[dataset_service.ListAnnotationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsPager: + r"""Lists Annotations belongs to a dataitem + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_annotations(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListAnnotationsRequest, dict]): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListAnnotationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListAnnotationsRequest): + request = dataset_service.ListAnnotationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_annotations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAnnotationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py new file mode 100644 index 0000000000..ec4c0b1b4c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset_service + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDatasetsResponse], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDataItemsResponse], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[data_item.DataItem]: + for page in self.pages: + yield from page.data_items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsAsyncPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[data_item.DataItem]: + async def async_generator(): + async for page in self.pages: + for response in page.data_items: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListAnnotationsResponse], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[annotation.Annotation]: + for page in self.pages: + yield from page.annotations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsAsyncPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[annotation.Annotation]: + async def async_generator(): + async for page in self.pages: + for response in page.annotations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py new file mode 100644 index 0000000000..c7d3ef664d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DatasetServiceTransport +from .grpc import DatasetServiceGrpcTransport +from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] +_transport_registry['grpc'] = DatasetServiceGrpcTransport +_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport + +__all__ = ( + 'DatasetServiceTransport', + 'DatasetServiceGrpcTransport', + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py new file mode 100644 index 0000000000..de369cc83f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -0,0 +1,381 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class DatasetServiceTransport(abc.ABC): + """Abstract transport class for DatasetService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, + default_timeout=None, + client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_timeout=None, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, + default_timeout=None, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_timeout=None, + client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_timeout=None, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, + default_timeout=None, + client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, + default_timeout=None, + client_info=client_info, + ), + self.list_data_items: gapic_v1.method.wrap_method( + self.list_data_items, + default_timeout=None, + client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_timeout=None, + client_info=client_info, + ), + self.list_annotations: gapic_v1.method.wrap_method( + self.list_annotations, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Union[ + dataset.Dataset, + Awaitable[dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Union[ + gca_dataset.Dataset, + Awaitable[gca_dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Union[ + dataset_service.ListDatasetsResponse, + Awaitable[dataset_service.ListDatasetsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Union[ + dataset_service.ListDataItemsResponse, + Awaitable[dataset_service.ListDataItemsResponse] + ]]: + raise NotImplementedError() + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Union[ + annotation_spec.AnnotationSpec, + Awaitable[annotation_spec.AnnotationSpec] + ]]: + raise NotImplementedError() + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Union[ + dataset_service.ListAnnotationsResponse, + Awaitable[dataset_service.ListAnnotationsResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'DatasetServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py new file mode 100644 index 0000000000..e98e2d4a78 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -0,0 +1,728 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO + + +class DatasetServiceGrpcTransport(DatasetServiceTransport): + """gRPC backend transport for DatasetService. + + The service that handles the CRUD of Vertex AI Dataset and + its child resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + dataset_service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + dataset_service.ListDataItemsResponse]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + ~.ListDataItemsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + dataset_service.ListAnnotationsResponse]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + ~.ListAnnotationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'DatasetServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..7b5c81ce2f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -0,0 +1,727 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import DatasetServiceGrpcTransport + + +class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): + """gRPC AsyncIO backend transport for DatasetService. + + The service that handles the CRUD of Vertex AI Dataset and + its child resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Awaitable[dataset_service.ListDatasetsResponse]]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Awaitable[dataset_service.ListDataItemsResponse]]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + Awaitable[~.ListDataItemsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Awaitable[annotation_spec.AnnotationSpec]]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Awaitable[dataset_service.ListAnnotationsResponse]]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + Awaitable[~.ListAnnotationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py new file mode 100644 index 0000000000..3d4508fc4a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import EndpointServiceClient +from .async_client import EndpointServiceAsyncClient + +__all__ = ( + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py new file mode 100644 index 0000000000..a1987b3175 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -0,0 +1,1729 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport +from .client import EndpointServiceClient + + +class EndpointServiceAsyncClient: + """A service for managing Vertex AI's Endpoints.""" + + _client: EndpointServiceClient + + DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) + model_path = staticmethod(EndpointServiceClient.model_path) + parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) + model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.model_deployment_monitoring_job_path) + parse_model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.parse_model_deployment_monitoring_job_path) + network_path = staticmethod(EndpointServiceClient.network_path) + parse_network_path = staticmethod(EndpointServiceClient.parse_network_path) + common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) + common_project_path = staticmethod(EndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) + common_location_path = staticmethod(EndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return EndpointServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = EndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_endpoint(self, + request: Union[endpoint_service.CreateEndpointRequest, dict] = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint_id (:class:`str`): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint, endpoint_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.CreateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_endpoint(self, + request: Union[endpoint_service.GetEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetEndpointRequest, dict]): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] + name (:class:`str`): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.GetEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_endpoints(self, + request: Union[endpoint_service.ListEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsAsyncPager: + r"""Lists Endpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_endpoints(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListEndpointsRequest, dict]): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.ListEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_endpoints, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEndpointsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_endpoint(self, + request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = await client.update_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UpdateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_endpoint(self, + request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteEndpointRequest, dict]): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. + name (:class:`str`): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeleteEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_model(self, + request: Union[endpoint_service.DeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_deploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeployModelRequest, dict]): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (:class:`google.cloud.aiplatform_v1.types.DeployedModel`): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Mapping[str, int]`): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model(self, + request: Union[endpoint_service.UndeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_undeploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UndeployModelRequest, dict]): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Mapping[str, int]`): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py new file mode 100644 index 0000000000..cc27a764c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -0,0 +1,1948 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import EndpointServiceGrpcTransport +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +class EndpointServiceClientMeta(type): + """Metaclass for the EndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] + _transport_registry["grpc"] = EndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[EndpointServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class EndpointServiceClient(metaclass=EndpointServiceClientMeta): + """A service for managing Vertex AI's Endpoints.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: + """Returns a fully-qualified model_deployment_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + + @staticmethod + def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: + """Parses a model_deployment_monitoring_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, EndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, EndpointServiceTransport): + # transport is a EndpointServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_endpoint(self, + request: Union[endpoint_service.CreateEndpointRequest, dict] = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + parent (str): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint, endpoint_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.CreateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.CreateEndpointRequest): + request = endpoint_service.CreateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_endpoint(self, + request: Union[endpoint_service.GetEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetEndpointRequest, dict]): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.GetEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.GetEndpointRequest): + request = endpoint_service.GetEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_endpoints(self, + request: Union[endpoint_service.ListEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsPager: + r"""Lists Endpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_endpoints(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListEndpointsRequest, dict]): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.ListEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.ListEndpointsRequest): + request = endpoint_service.ListEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_endpoint(self, + request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = client.update_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UpdateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UpdateEndpointRequest): + request = endpoint_service.UpdateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_endpoint(self, + request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteEndpointRequest, dict]): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. + name (str): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeleteEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeleteEndpointRequest): + request = endpoint_service.DeleteEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_model(self, + request: Union[endpoint_service.DeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_deploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeployModelRequest, dict]): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + endpoint (str): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Mapping[str, int]): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeployModelRequest): + request = endpoint_service.DeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model(self, + request: Union[endpoint_service.UndeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_undeploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UndeployModelRequest, dict]): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + endpoint (str): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Mapping[str, int]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UndeployModelRequest): + request = endpoint_service.UndeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py new file mode 100644 index 0000000000..9521719622 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint_service + + +class ListEndpointsPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., endpoint_service.ListEndpointsResponse], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[endpoint.Endpoint]: + for page in self.pages: + yield from page.endpoints + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEndpointsAsyncPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[endpoint.Endpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..e1f410c28c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import EndpointServiceTransport +from .grpc import EndpointServiceGrpcTransport +from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] +_transport_registry['grpc'] = EndpointServiceGrpcTransport +_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport + +__all__ = ( + 'EndpointServiceTransport', + 'EndpointServiceGrpcTransport', + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py new file mode 100644 index 0000000000..481036e3f1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class EndpointServiceTransport(abc.ABC): + """Abstract transport class for EndpointService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_endpoint: gapic_v1.method.wrap_method( + self.create_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.get_endpoint: gapic_v1.method.wrap_method( + self.get_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.list_endpoints: gapic_v1.method.wrap_method( + self.list_endpoints, + default_timeout=None, + client_info=client_info, + ), + self.update_endpoint: gapic_v1.method.wrap_method( + self.update_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.delete_endpoint: gapic_v1.method.wrap_method( + self.delete_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, + default_timeout=None, + client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Union[ + endpoint.Endpoint, + Awaitable[endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Union[ + endpoint_service.ListEndpointsResponse, + Awaitable[endpoint_service.ListEndpointsResponse] + ]]: + raise NotImplementedError() + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Union[ + gca_endpoint.Endpoint, + Awaitable[gca_endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'EndpointServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..6253c55e45 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py @@ -0,0 +1,651 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class EndpointServiceGrpcTransport(EndpointServiceTransport): + """gRPC backend transport for EndpointService. + + A service for managing Vertex AI's Endpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + endpoint.Endpoint]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + endpoint_service.ListEndpointsResponse]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + ~.ListEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + gca_endpoint.Endpoint]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'EndpointServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..cefb5eee93 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import EndpointServiceGrpcTransport + + +class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): + """gRPC AsyncIO backend transport for EndpointService. + + A service for managing Vertex AI's Endpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Awaitable[endpoint.Endpoint]]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Awaitable[endpoint_service.ListEndpointsResponse]]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + Awaitable[~.ListEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Awaitable[gca_endpoint.Endpoint]]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py new file mode 100644 index 0000000000..ef931462a5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreOnlineServingServiceClient +from .async_client import FeaturestoreOnlineServingServiceAsyncClient + +__all__ = ( + 'FeaturestoreOnlineServingServiceClient', + 'FeaturestoreOnlineServingServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py new file mode 100644 index 0000000000..44ccac2510 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -0,0 +1,1058 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport +from .client import FeaturestoreOnlineServingServiceClient + + +class FeaturestoreOnlineServingServiceAsyncClient: + """A service for serving online feature values.""" + + _client: FeaturestoreOnlineServingServiceClient + + DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.entity_type_path) + parse_entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_entity_type_path) + common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_organization_path) + common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_project_path) + parse_common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_project_path) + common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_location_path) + parse_common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return FeaturestoreOnlineServingServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(FeaturestoreOnlineServingServiceClient).get_transport_class, type(FeaturestoreOnlineServingServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreOnlineServingServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def read_feature_values(self, + request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = await client.read_feature_values(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_online_service.ReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def streaming_read_feature_values(self, + request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = await client.streaming_read_feature_values(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (:class:`str`): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_read_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreOnlineServingServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py new file mode 100644 index 0000000000..336cd60eff --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py @@ -0,0 +1,1252 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +class FeaturestoreOnlineServingServiceClientMeta(type): + """Metaclass for the FeaturestoreOnlineServingService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] + _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FeaturestoreOnlineServingServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreOnlineServingServiceClient(metaclass=FeaturestoreOnlineServingServiceClientMeta): + """A service for serving online feature values.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str,str]: + """Parses a entity_type path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreOnlineServingServiceTransport): + # transport is a FeaturestoreOnlineServingServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def read_feature_values(self, + request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = client.read_feature_values(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.ReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_online_service.ReadFeatureValuesRequest): + request = featurestore_online_service.ReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def streaming_read_feature_values(self, + request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = client.streaming_read_feature_values(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.StreamingReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_online_service.StreamingReadFeatureValuesRequest): + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.streaming_read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreOnlineServingServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py new file mode 100644 index 0000000000..f1db8ba8e1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreOnlineServingServiceTransport +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] +_transport_registry['grpc'] = FeaturestoreOnlineServingServiceGrpcTransport +_transport_registry['grpc_asyncio'] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + +__all__ = ( + 'FeaturestoreOnlineServingServiceTransport', + 'FeaturestoreOnlineServingServiceGrpcTransport', + 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py new file mode 100644 index 0000000000..34c8fea0f9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FeaturestoreOnlineServingServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreOnlineServingService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_feature_values: gapic_v1.method.wrap_method( + self.read_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.streaming_read_feature_values: gapic_v1.method.wrap_method( + self.streaming_read_feature_values, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse] + ]]: + raise NotImplementedError() + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'FeaturestoreOnlineServingServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py new file mode 100644 index 0000000000..ccfc05c906 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py @@ -0,0 +1,502 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreOnlineServingServiceGrpcTransport(FeaturestoreOnlineServingServiceTransport): + """gRPC backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_feature_values' not in self._stubs: + self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/ReadFeatureValues', + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['read_feature_values'] + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'streaming_read_feature_values' not in self._stubs: + self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['streaming_read_feature_values'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'FeaturestoreOnlineServingServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..9ab9e825ff --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport + + +class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(FeaturestoreOnlineServingServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_feature_values' not in self._stubs: + self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/ReadFeatureValues', + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['read_feature_values'] + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'streaming_read_feature_values' not in self._stubs: + self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['streaming_read_feature_values'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py new file mode 100644 index 0000000000..b8542bb22c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreServiceClient +from .async_client import FeaturestoreServiceAsyncClient + +__all__ = ( + 'FeaturestoreServiceClient', + 'FeaturestoreServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py new file mode 100644 index 0000000000..0335927ca3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -0,0 +1,3377 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import featurestore_monitoring +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport +from .client import FeaturestoreServiceClient + + +class FeaturestoreServiceAsyncClient: + """The service that handles CRUD and List for resources for + Featurestore. + """ + + _client: FeaturestoreServiceClient + + DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) + parse_entity_type_path = staticmethod(FeaturestoreServiceClient.parse_entity_type_path) + feature_path = staticmethod(FeaturestoreServiceClient.feature_path) + parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) + featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) + parse_featurestore_path = staticmethod(FeaturestoreServiceClient.parse_featurestore_path) + common_billing_account_path = staticmethod(FeaturestoreServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(FeaturestoreServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(FeaturestoreServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(FeaturestoreServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(FeaturestoreServiceClient.parse_common_organization_path) + common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) + parse_common_project_path = staticmethod(FeaturestoreServiceClient.parse_common_project_path) + common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) + parse_common_location_path = staticmethod(FeaturestoreServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return FeaturestoreServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(FeaturestoreServiceClient).get_transport_class, type(FeaturestoreServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_featurestore(self, + request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Featurestore in a given project and + location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. + parent (:class:`str`): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore_id (:class:`str`): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore, featurestore_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_featurestore(self, + request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_featurestore(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. + name (:class:`str`): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Featurestore: + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_featurestores(self, + request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresAsyncPager: + r"""Lists Featurestores in a given project and location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_featurestores(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + parent (:class:`str`): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListFeaturestoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_featurestores, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturestoresAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_featurestore(self, + request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the parameters of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. + featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore.name", request.featurestore.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_featurestore(self, + request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. + name (:class:`str`): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_featurestore, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_entity_type(self, + request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new EntityType in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. + parent (:class:`str`): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type_id (:class:`str`): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type, entity_type_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_entity_type(self, + request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. + name (:class:`str`): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_entity_types(self, + request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesAsyncPager: + r"""Lists EntityTypes in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_entity_types(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + parent (:class:`str`): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListEntityTypesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_entity_types, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEntityTypesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_entity_type(self, + request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateEntityTypeRequest( + ) + + # Make the request + response = await client.update_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. + entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type.name", request.entity_type.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_entity_type(self, + request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (:class:`str`): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_entity_type, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_feature(self, + request: Union[featurestore_service.CreateFeatureRequest, dict] = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + feature_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Feature in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (:class:`google.cloud.aiplatform_v1.types.Feature`): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature_id (:class:`str`): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature, feature_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + if feature_id is not None: + request.feature_id = feature_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_create_features(self, + request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a batch of Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_batch_create_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]`): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.BatchCreateFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_features, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_feature(self, + request: Union[featurestore_service.GetFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = await client.get_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. + name (:class:`str`): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_features(self, + request: Union[featurestore_service.ListFeaturesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesAsyncPager: + r"""Lists Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + parent (:class:`str`): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesAsyncPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_features, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_feature(self, + request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = await client.update_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. + feature (:class:`google.cloud.aiplatform_v1.types.Feature`): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``disable_monitoring`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("feature.name", request.feature.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_feature(self, + request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. + name (:class:`str`): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_feature, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_feature_values(self, + request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_import_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ImportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_read_feature_values(self, + request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, + *, + featurestore: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + featurestore (:class:`str`): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.BatchReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore", request.featurestore), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_feature_values(self, + request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports Feature values from all the entities of a + target EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_export_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ExportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def search_features(self, + request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, + *, + location: str = None, + query: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesAsyncPager: + r"""Searches Features matching a query in a given + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_search_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + location (:class:`str`): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`str`): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location, query]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.SearchFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_features, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("location", request.location), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchFeaturesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py new file mode 100644 index 0000000000..8ab88cc7f6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -0,0 +1,3589 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import featurestore_monitoring +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +class FeaturestoreServiceClientMeta(type): + """Metaclass for the FeaturestoreService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] + _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FeaturestoreServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): + """The service that handles CRUD and List for resources for + Featurestore. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str,str]: + """Parses a entity_type path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def feature_path(project: str,location: str,featurestore: str,entity_type: str,feature: str,) -> str: + """Returns a fully-qualified feature string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) + + @staticmethod + def parse_feature_path(path: str) -> Dict[str,str]: + """Parses a feature path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def featurestore_path(project: str,location: str,featurestore: str,) -> str: + """Returns a fully-qualified featurestore string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) + + @staticmethod + def parse_featurestore_path(path: str) -> Dict[str,str]: + """Parses a featurestore path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreServiceTransport): + # transport is a FeaturestoreServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_featurestore(self, + request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Featurestore in a given project and + location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore_id (str): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore, featurestore_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): + request = featurestore_service.CreateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_featurestore(self, + request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_featurestore(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. + name (str): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Featurestore: + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeaturestoreRequest): + request = featurestore_service.GetFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_featurestores(self, + request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresPager: + r"""Lists Featurestores in a given project and location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_featurestores(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturestoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturestoresRequest): + request = featurestore_service.ListFeaturestoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_featurestores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturestoresPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_featurestore(self, + request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates the parameters of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): + request = featurestore_service.UpdateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore.name", request.featurestore.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_featurestore(self, + request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (bool): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): + request = featurestore_service.DeleteFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_entity_type(self, + request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new EntityType in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. + parent (str): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (google.cloud.aiplatform_v1.types.EntityType): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type, entity_type_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateEntityTypeRequest): + request = featurestore_service.CreateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + def get_entity_type(self, + request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = client.get_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetEntityTypeRequest): + request = featurestore_service.GetEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_entity_types(self, + request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesPager: + r"""Lists EntityTypes in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_entity_types(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListEntityTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListEntityTypesRequest): + request = featurestore_service.ListEntityTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_entity_types] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEntityTypesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_entity_type(self, + request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateEntityTypeRequest( + ) + + # Make the request + response = client.update_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. + entity_type (google.cloud.aiplatform_v1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): + request = featurestore_service.UpdateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type.name", request.entity_type.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_entity_type(self, + request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (str): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (bool): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): + request = featurestore_service.DeleteEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_feature(self, + request: Union[featurestore_service.CreateFeatureRequest, dict] = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + feature_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Feature in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. + parent (str): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature_id (str): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature, feature_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeatureRequest): + request = featurestore_service.CreateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + if feature_id is not None: + request.feature_id = feature_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_create_features(self, + request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a batch of Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_batch_create_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + parent (str): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchCreateFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): + request = featurestore_service.BatchCreateFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + def get_feature(self, + request: Union[featurestore_service.GetFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = client.get_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeatureRequest): + request = featurestore_service.GetFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_features(self, + request: Union[featurestore_service.ListFeaturesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesPager: + r"""Lists Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturesRequest): + request = featurestore_service.ListFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_feature(self, + request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = client.update_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``disable_monitoring`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeatureRequest): + request = featurestore_service.UpdateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("feature.name", request.feature.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_feature(self, + request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. + name (str): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeatureRequest): + request = featurestore_service.DeleteFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_feature_values(self, + request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_import_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ImportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): + request = featurestore_service.ImportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_read_feature_values(self, + request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, + *, + featurestore: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + featurestore (str): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): + request = featurestore_service.BatchReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore", request.featurestore), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def export_feature_values(self, + request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports Feature values from all the entities of a + target EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_export_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ExportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): + request = featurestore_service.ExportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def search_features(self, + request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, + *, + location: str = None, + query: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesPager: + r"""Searches Features matching a query in a given + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_search_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location, query]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.SearchFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.SearchFeaturesRequest): + request = featurestore_service.SearchFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("location", request.location), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchFeaturesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py new file mode 100644 index 0000000000..0c63022513 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py @@ -0,0 +1,505 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service + + +class ListFeaturestoresPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListFeaturestoresResponse], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[featurestore.Featurestore]: + for page in self.pages: + yield from page.featurestores + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturestoresAsyncPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturestoresResponse]], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[featurestore.Featurestore]: + async def async_generator(): + async for page in self.pages: + for response in page.featurestores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEntityTypesPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListEntityTypesResponse], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[entity_type.EntityType]: + for page in self.pages: + yield from page.entity_types + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEntityTypesAsyncPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[entity_type.EntityType]: + async def async_generator(): + async for page in self.pages: + for response in page.entity_types: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturesPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListFeaturesResponse], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturesAsyncPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchFeaturesPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.SearchFeaturesResponse], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchFeaturesAsyncPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py new file mode 100644 index 0000000000..78cfc62d21 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreServiceTransport +from .grpc import FeaturestoreServiceGrpcTransport +from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] +_transport_registry['grpc'] = FeaturestoreServiceGrpcTransport +_transport_registry['grpc_asyncio'] = FeaturestoreServiceGrpcAsyncIOTransport + +__all__ = ( + 'FeaturestoreServiceTransport', + 'FeaturestoreServiceGrpcTransport', + 'FeaturestoreServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py new file mode 100644 index 0000000000..6389133237 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py @@ -0,0 +1,523 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FeaturestoreServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_featurestore: gapic_v1.method.wrap_method( + self.create_featurestore, + default_timeout=None, + client_info=client_info, + ), + self.get_featurestore: gapic_v1.method.wrap_method( + self.get_featurestore, + default_timeout=None, + client_info=client_info, + ), + self.list_featurestores: gapic_v1.method.wrap_method( + self.list_featurestores, + default_timeout=None, + client_info=client_info, + ), + self.update_featurestore: gapic_v1.method.wrap_method( + self.update_featurestore, + default_timeout=None, + client_info=client_info, + ), + self.delete_featurestore: gapic_v1.method.wrap_method( + self.delete_featurestore, + default_timeout=None, + client_info=client_info, + ), + self.create_entity_type: gapic_v1.method.wrap_method( + self.create_entity_type, + default_timeout=None, + client_info=client_info, + ), + self.get_entity_type: gapic_v1.method.wrap_method( + self.get_entity_type, + default_timeout=None, + client_info=client_info, + ), + self.list_entity_types: gapic_v1.method.wrap_method( + self.list_entity_types, + default_timeout=None, + client_info=client_info, + ), + self.update_entity_type: gapic_v1.method.wrap_method( + self.update_entity_type, + default_timeout=None, + client_info=client_info, + ), + self.delete_entity_type: gapic_v1.method.wrap_method( + self.delete_entity_type, + default_timeout=None, + client_info=client_info, + ), + self.create_feature: gapic_v1.method.wrap_method( + self.create_feature, + default_timeout=None, + client_info=client_info, + ), + self.batch_create_features: gapic_v1.method.wrap_method( + self.batch_create_features, + default_timeout=None, + client_info=client_info, + ), + self.get_feature: gapic_v1.method.wrap_method( + self.get_feature, + default_timeout=None, + client_info=client_info, + ), + self.list_features: gapic_v1.method.wrap_method( + self.list_features, + default_timeout=None, + client_info=client_info, + ), + self.update_feature: gapic_v1.method.wrap_method( + self.update_feature, + default_timeout=None, + client_info=client_info, + ), + self.delete_feature: gapic_v1.method.wrap_method( + self.delete_feature, + default_timeout=None, + client_info=client_info, + ), + self.import_feature_values: gapic_v1.method.wrap_method( + self.import_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.batch_read_feature_values: gapic_v1.method.wrap_method( + self.batch_read_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.export_feature_values: gapic_v1.method.wrap_method( + self.export_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.search_features: gapic_v1.method.wrap_method( + self.search_features, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Union[ + featurestore.Featurestore, + Awaitable[featurestore.Featurestore] + ]]: + raise NotImplementedError() + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Union[ + featurestore_service.ListFeaturestoresResponse, + Awaitable[featurestore_service.ListFeaturestoresResponse] + ]]: + raise NotImplementedError() + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + Union[ + entity_type.EntityType, + Awaitable[entity_type.EntityType] + ]]: + raise NotImplementedError() + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Union[ + featurestore_service.ListEntityTypesResponse, + Awaitable[featurestore_service.ListEntityTypesResponse] + ]]: + raise NotImplementedError() + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Union[ + gca_entity_type.EntityType, + Awaitable[gca_entity_type.EntityType] + ]]: + raise NotImplementedError() + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + Union[ + feature.Feature, + Awaitable[feature.Feature] + ]]: + raise NotImplementedError() + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Union[ + featurestore_service.ListFeaturesResponse, + Awaitable[featurestore_service.ListFeaturesResponse] + ]]: + raise NotImplementedError() + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + Union[ + gca_feature.Feature, + Awaitable[gca_feature.Feature] + ]]: + raise NotImplementedError() + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Union[ + featurestore_service.SearchFeaturesResponse, + Awaitable[featurestore_service.SearchFeaturesResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'FeaturestoreServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py new file mode 100644 index 0000000000..5c7d287de3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py @@ -0,0 +1,1022 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): + """gRPC backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_featurestore' not in self._stubs: + self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeaturestore', + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_featurestore'] + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + featurestore.Featurestore]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + ~.Featurestore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_featurestore' not in self._stubs: + self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeaturestore', + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs['get_featurestore'] + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + featurestore_service.ListFeaturestoresResponse]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + ~.ListFeaturestoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_featurestores' not in self._stubs: + self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeaturestores', + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs['list_featurestores'] + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_featurestore' not in self._stubs: + self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeaturestore', + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_featurestore'] + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_featurestore' not in self._stubs: + self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeaturestore', + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_featurestore'] + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + operations_pb2.Operation]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_entity_type' not in self._stubs: + self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/CreateEntityType', + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_entity_type'] + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + entity_type.EntityType]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_entity_type' not in self._stubs: + self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/GetEntityType', + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs['get_entity_type'] + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + featurestore_service.ListEntityTypesResponse]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + ~.ListEntityTypesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_entity_types' not in self._stubs: + self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ListEntityTypes', + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs['list_entity_types'] + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + gca_entity_type.EntityType]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_entity_type' not in self._stubs: + self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateEntityType', + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs['update_entity_type'] + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_entity_type' not in self._stubs: + self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteEntityType', + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_entity_type'] + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + operations_pb2.Operation]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_feature' not in self._stubs: + self._stubs['create_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeature', + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_feature'] + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_features' not in self._stubs: + self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/BatchCreateFeatures', + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_create_features'] + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + feature.Feature]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_feature' not in self._stubs: + self._stubs['get_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeature', + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs['get_feature'] + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + featurestore_service.ListFeaturesResponse]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + ~.ListFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_features' not in self._stubs: + self._stubs['list_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeatures', + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs['list_features'] + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + gca_feature.Feature]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_feature' not in self._stubs: + self._stubs['update_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeature', + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs['update_feature'] + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_feature' not in self._stubs: + self._stubs['delete_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeature', + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_feature'] + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_feature_values' not in self._stubs: + self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ImportFeatureValues', + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_feature_values'] + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_feature_values' not in self._stubs: + self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/BatchReadFeatureValues', + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_read_feature_values'] + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_feature_values' not in self._stubs: + self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ExportFeatureValues', + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_feature_values'] + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + featurestore_service.SearchFeaturesResponse]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + ~.SearchFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_features' not in self._stubs: + self._stubs['search_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/SearchFeatures', + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs['search_features'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'FeaturestoreServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..72d2cedf02 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py @@ -0,0 +1,1021 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreServiceGrpcTransport + + +class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_featurestore' not in self._stubs: + self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeaturestore', + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_featurestore'] + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Awaitable[featurestore.Featurestore]]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + Awaitable[~.Featurestore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_featurestore' not in self._stubs: + self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeaturestore', + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs['get_featurestore'] + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Awaitable[featurestore_service.ListFeaturestoresResponse]]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + Awaitable[~.ListFeaturestoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_featurestores' not in self._stubs: + self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeaturestores', + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs['list_featurestores'] + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_featurestore' not in self._stubs: + self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeaturestore', + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_featurestore'] + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_featurestore' not in self._stubs: + self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeaturestore', + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_featurestore'] + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_entity_type' not in self._stubs: + self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/CreateEntityType', + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_entity_type'] + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + Awaitable[entity_type.EntityType]]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_entity_type' not in self._stubs: + self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/GetEntityType', + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs['get_entity_type'] + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Awaitable[featurestore_service.ListEntityTypesResponse]]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + Awaitable[~.ListEntityTypesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_entity_types' not in self._stubs: + self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ListEntityTypes', + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs['list_entity_types'] + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Awaitable[gca_entity_type.EntityType]]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_entity_type' not in self._stubs: + self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateEntityType', + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs['update_entity_type'] + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_entity_type' not in self._stubs: + self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteEntityType', + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_entity_type'] + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_feature' not in self._stubs: + self._stubs['create_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeature', + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_feature'] + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_features' not in self._stubs: + self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/BatchCreateFeatures', + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_create_features'] + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + Awaitable[feature.Feature]]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_feature' not in self._stubs: + self._stubs['get_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeature', + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs['get_feature'] + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Awaitable[featurestore_service.ListFeaturesResponse]]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + Awaitable[~.ListFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_features' not in self._stubs: + self._stubs['list_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeatures', + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs['list_features'] + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + Awaitable[gca_feature.Feature]]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_feature' not in self._stubs: + self._stubs['update_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeature', + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs['update_feature'] + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_feature' not in self._stubs: + self._stubs['delete_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeature', + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_feature'] + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_feature_values' not in self._stubs: + self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ImportFeatureValues', + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_feature_values'] + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_feature_values' not in self._stubs: + self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/BatchReadFeatureValues', + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_read_feature_values'] + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_feature_values' not in self._stubs: + self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/ExportFeatureValues', + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_feature_values'] + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Awaitable[featurestore_service.SearchFeaturesResponse]]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + Awaitable[~.SearchFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_features' not in self._stubs: + self._stubs['search_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.FeaturestoreService/SearchFeatures', + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs['search_features'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'FeaturestoreServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py new file mode 100644 index 0000000000..3ae6bb8e71 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import IndexEndpointServiceClient +from .async_client import IndexEndpointServiceAsyncClient + +__all__ = ( + 'IndexEndpointServiceClient', + 'IndexEndpointServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py new file mode 100644 index 0000000000..713c7ef3e3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -0,0 +1,1793 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1.types import index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport +from .client import IndexEndpointServiceClient + + +class IndexEndpointServiceAsyncClient: + """A service for managing Vertex AI's IndexEndpoints.""" + + _client: IndexEndpointServiceClient + + DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexEndpointServiceClient.index_path) + parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod(IndexEndpointServiceClient.parse_index_endpoint_path) + common_billing_account_path = staticmethod(IndexEndpointServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(IndexEndpointServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(IndexEndpointServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(IndexEndpointServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(IndexEndpointServiceClient.parse_common_organization_path) + common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod(IndexEndpointServiceClient.parse_common_project_path) + common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod(IndexEndpointServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return IndexEndpointServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(IndexEndpointServiceClient).get_transport_class, type(IndexEndpointServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = IndexEndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_index_endpoint(self, + request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (:class:`google.cloud.aiplatform_v1.types.IndexEndpoint`): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.CreateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index_endpoint(self, + request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint] + name (:class:`str`): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.GetIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_index_endpoints(self, + request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsAsyncPager: + r"""Lists IndexEndpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest, dict]): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.ListIndexEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_index_endpoints, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexEndpointsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index_endpoint(self, + request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = await client.update_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (:class:`google.cloud.aiplatform_v1.types.IndexEndpoint`): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint.name", request.index_endpoint.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_index_endpoint(self, + request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint]. + name (:class:`str`): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index_endpoint, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_index(self, + request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_deploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1.types.DeployedIndex`): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.DeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_index(self, + request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_undeploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UndeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (:class:`str`): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.UndeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def mutate_deployed_index(self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1.types.DeployedIndex`): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.MutateDeployedIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_deployed_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexEndpointServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py new file mode 100644 index 0000000000..679369333c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -0,0 +1,1996 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1.types import index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexEndpointServiceGrpcTransport +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +class IndexEndpointServiceClientMeta(type): + """Metaclass for the IndexEndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] + _transport_registry["grpc"] = IndexEndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[IndexEndpointServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): + """A service for managing Vertex AI's IndexEndpoints.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def index_path(project: str,location: str,index: str,) -> str: + """Returns a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str,str]: + """Parses a index path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: + """Returns a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str,str]: + """Parses a index_endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, IndexEndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexEndpointServiceTransport): + # transport is a IndexEndpointServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_index_endpoint(self, + request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. + parent (str): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.CreateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.CreateIndexEndpointRequest): + request = index_endpoint_service.CreateIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index_endpoint(self, + request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint] + name (str): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.GetIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.GetIndexEndpointRequest): + request = index_endpoint_service.GetIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_index_endpoints(self, + request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsPager: + r"""Lists IndexEndpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest, dict]): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.ListIndexEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.ListIndexEndpointsRequest): + request = index_endpoint_service.ListIndexEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_index_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index_endpoint(self, + request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = client.update_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UpdateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UpdateIndexEndpointRequest): + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint.name", request.index_endpoint.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_index_endpoint(self, + request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint]. + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeleteIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeleteIndexEndpointRequest): + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_index(self, + request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_deploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeployIndexRequest): + request = index_endpoint_service.DeployIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_index(self, + request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_undeploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UndeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (str): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UndeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UndeployIndexRequest): + request = index_endpoint_service.UndeployIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def mutate_deployed_index(self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.MutateDeployedIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.MutateDeployedIndexRequest): + request = index_endpoint_service.MutateDeployedIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_deployed_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexEndpointServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py new file mode 100644 index 0000000000..7d43ed5fe1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint_service + + +class ListIndexEndpointsPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., index_endpoint_service.ListIndexEndpointsResponse], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[index_endpoint.IndexEndpoint]: + for page in self.pages: + yield from page.index_endpoints + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListIndexEndpointsAsyncPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[index_endpoint_service.ListIndexEndpointsResponse]], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[index_endpoint.IndexEndpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.index_endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..3b6ca0341f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexEndpointServiceTransport +from .grpc import IndexEndpointServiceGrpcTransport +from .grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] +_transport_registry['grpc'] = IndexEndpointServiceGrpcTransport +_transport_registry['grpc_asyncio'] = IndexEndpointServiceGrpcAsyncIOTransport + +__all__ = ( + 'IndexEndpointServiceTransport', + 'IndexEndpointServiceGrpcTransport', + 'IndexEndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py new file mode 100644 index 0000000000..f9e2229342 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class IndexEndpointServiceTransport(abc.ABC): + """Abstract transport class for IndexEndpointService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index_endpoint: gapic_v1.method.wrap_method( + self.create_index_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.get_index_endpoint: gapic_v1.method.wrap_method( + self.get_index_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.list_index_endpoints: gapic_v1.method.wrap_method( + self.list_index_endpoints, + default_timeout=None, + client_info=client_info, + ), + self.update_index_endpoint: gapic_v1.method.wrap_method( + self.update_index_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.delete_index_endpoint: gapic_v1.method.wrap_method( + self.delete_index_endpoint, + default_timeout=None, + client_info=client_info, + ), + self.deploy_index: gapic_v1.method.wrap_method( + self.deploy_index, + default_timeout=None, + client_info=client_info, + ), + self.undeploy_index: gapic_v1.method.wrap_method( + self.undeploy_index, + default_timeout=None, + client_info=client_info, + ), + self.mutate_deployed_index: gapic_v1.method.wrap_method( + self.mutate_deployed_index, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + Union[ + index_endpoint.IndexEndpoint, + Awaitable[index_endpoint.IndexEndpoint] + ]]: + raise NotImplementedError() + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + Union[ + index_endpoint_service.ListIndexEndpointsResponse, + Awaitable[index_endpoint_service.ListIndexEndpointsResponse] + ]]: + raise NotImplementedError() + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + Union[ + gca_index_endpoint.IndexEndpoint, + Awaitable[gca_index_endpoint.IndexEndpoint] + ]]: + raise NotImplementedError() + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def mutate_deployed_index(self) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'IndexEndpointServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..a3460acf71 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py @@ -0,0 +1,679 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): + """gRPC backend transport for IndexEndpointService. + + A service for managing Vertex AI's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index_endpoint' not in self._stubs: + self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/CreateIndexEndpoint', + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index_endpoint'] + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + index_endpoint.IndexEndpoint]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index_endpoint' not in self._stubs: + self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/GetIndexEndpoint', + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['get_index_endpoint'] + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + index_endpoint_service.ListIndexEndpointsResponse]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + ~.ListIndexEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_index_endpoints' not in self._stubs: + self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/ListIndexEndpoints', + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs['list_index_endpoints'] + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + gca_index_endpoint.IndexEndpoint]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index_endpoint' not in self._stubs: + self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/UpdateIndexEndpoint', + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['update_index_endpoint'] + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index_endpoint' not in self._stubs: + self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/DeleteIndexEndpoint', + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index_endpoint'] + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_index' not in self._stubs: + self._stubs['deploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/DeployIndex', + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_index'] + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_index' not in self._stubs: + self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/UndeployIndex', + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_index'] + + @property + def mutate_deployed_index(self) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_deployed_index' not in self._stubs: + self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex', + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['mutate_deployed_index'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'IndexEndpointServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..c463d3fda4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,678 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexEndpointServiceGrpcTransport + + +class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): + """gRPC AsyncIO backend transport for IndexEndpointService. + + A service for managing Vertex AI's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index_endpoint' not in self._stubs: + self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/CreateIndexEndpoint', + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index_endpoint'] + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + Awaitable[index_endpoint.IndexEndpoint]]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index_endpoint' not in self._stubs: + self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/GetIndexEndpoint', + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['get_index_endpoint'] + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + Awaitable[index_endpoint_service.ListIndexEndpointsResponse]]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + Awaitable[~.ListIndexEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_index_endpoints' not in self._stubs: + self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/ListIndexEndpoints', + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs['list_index_endpoints'] + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + Awaitable[gca_index_endpoint.IndexEndpoint]]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index_endpoint' not in self._stubs: + self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/UpdateIndexEndpoint', + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['update_index_endpoint'] + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index_endpoint' not in self._stubs: + self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/DeleteIndexEndpoint', + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index_endpoint'] + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_index' not in self._stubs: + self._stubs['deploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/DeployIndex', + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_index'] + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_index' not in self._stubs: + self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/UndeployIndex', + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_index'] + + @property + def mutate_deployed_index(self) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_deployed_index' not in self._stubs: + self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex', + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['mutate_deployed_index'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'IndexEndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py new file mode 100644 index 0000000000..f62272a024 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import IndexServiceClient +from .async_client import IndexServiceAsyncClient + +__all__ = ( + 'IndexServiceClient', + 'IndexServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py new file mode 100644 index 0000000000..f12adbcacc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py @@ -0,0 +1,1432 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.index_service import pagers +from google.cloud.aiplatform_v1.types import deployed_index_ref +from google.cloud.aiplatform_v1.types import index +from google.cloud.aiplatform_v1.types import index as gca_index +from google.cloud.aiplatform_v1.types import index_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport +from .client import IndexServiceClient + + +class IndexServiceAsyncClient: + """A service for creating and managing Vertex AI's Index + resources. + """ + + _client: IndexServiceClient + + DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexServiceClient.index_path) + parse_index_path = staticmethod(IndexServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod(IndexServiceClient.parse_index_endpoint_path) + common_billing_account_path = staticmethod(IndexServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(IndexServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(IndexServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(IndexServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(IndexServiceClient.parse_common_organization_path) + common_project_path = staticmethod(IndexServiceClient.common_project_path) + parse_common_project_path = staticmethod(IndexServiceClient.parse_common_project_path) + common_location_path = staticmethod(IndexServiceClient.common_location_path) + parse_common_location_path = staticmethod(IndexServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return IndexServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> IndexServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(IndexServiceClient).get_transport_class, type(IndexServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, IndexServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = IndexServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_index(self, + request: Union[index_service.CreateIndexRequest, dict] = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateIndexRequest, dict]): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (:class:`google.cloud.aiplatform_v1.types.Index`): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.CreateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index(self, + request: Union[index_service.GetIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetIndexRequest, dict]): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1.IndexService.GetIndex] + name (:class:`str`): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.GetIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_indexes(self, + request: Union[index_service.ListIndexesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesAsyncPager: + r"""Lists Indexes in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_indexes(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListIndexesRequest, dict]): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesAsyncPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.ListIndexesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_indexes, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index(self, + request: Union[index_service.UpdateIndexRequest, dict] = None, + *, + index: gca_index.Index = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateIndexRequest, dict]): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. + index (:class:`google.cloud.aiplatform_v1.types.Index`): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.UpdateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index.name", request.index.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_index(self, + request: Union[index_service.DeleteIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] + had been undeployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteIndexRequest, dict]): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1.IndexService.DeleteIndex]. + name (:class:`str`): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.DeleteIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py new file mode 100644 index 0000000000..01ffe7b186 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py @@ -0,0 +1,1635 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.index_service import pagers +from google.cloud.aiplatform_v1.types import deployed_index_ref +from google.cloud.aiplatform_v1.types import index +from google.cloud.aiplatform_v1.types import index as gca_index +from google.cloud.aiplatform_v1.types import index_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexServiceGrpcTransport +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +class IndexServiceClientMeta(type): + """Metaclass for the IndexService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] + _transport_registry["grpc"] = IndexServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[IndexServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexServiceClient(metaclass=IndexServiceClientMeta): + """A service for creating and managing Vertex AI's Index + resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def index_path(project: str,location: str,index: str,) -> str: + """Returns a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str,str]: + """Parses a index path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: + """Returns a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str,str]: + """Parses a index_endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, IndexServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexServiceTransport): + # transport is a IndexServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_index(self, + request: Union[index_service.CreateIndexRequest, dict] = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateIndexRequest, dict]): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. + parent (str): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (google.cloud.aiplatform_v1.types.Index): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.CreateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.CreateIndexRequest): + request = index_service.CreateIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index(self, + request: Union[index_service.GetIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = client.get_index(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetIndexRequest, dict]): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1.IndexService.GetIndex] + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.GetIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.GetIndexRequest): + request = index_service.GetIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_indexes(self, + request: Union[index_service.ListIndexesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesPager: + r"""Lists Indexes in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_indexes(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListIndexesRequest, dict]): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. + parent (str): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.ListIndexesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.ListIndexesRequest): + request = index_service.ListIndexesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_indexes] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index(self, + request: Union[index_service.UpdateIndexRequest, dict] = None, + *, + index: gca_index.Index = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateIndexRequest, dict]): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. + index (google.cloud.aiplatform_v1.types.Index): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.UpdateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.UpdateIndexRequest): + request = index_service.UpdateIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index.name", request.index.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_index(self, + request: Union[index_service.DeleteIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] + had been undeployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteIndexRequest, dict]): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1.IndexService.DeleteIndex]. + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.DeleteIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.DeleteIndexRequest): + request = index_service.DeleteIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py new file mode 100644 index 0000000000..9604823c15 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import index +from google.cloud.aiplatform_v1.types import index_service + + +class ListIndexesPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., index_service.ListIndexesResponse], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[index.Index]: + for page in self.pages: + yield from page.indexes + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListIndexesAsyncPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[index_service.ListIndexesResponse]], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[index.Index]: + async def async_generator(): + async for page in self.pages: + for response in page.indexes: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py new file mode 100644 index 0000000000..651b5193ef --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexServiceTransport +from .grpc import IndexServiceGrpcTransport +from .grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] +_transport_registry['grpc'] = IndexServiceGrpcTransport +_transport_registry['grpc_asyncio'] = IndexServiceGrpcAsyncIOTransport + +__all__ = ( + 'IndexServiceTransport', + 'IndexServiceGrpcTransport', + 'IndexServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py new file mode 100644 index 0000000000..1ce64f4373 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import index +from google.cloud.aiplatform_v1.types import index_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class IndexServiceTransport(abc.ABC): + """Abstract transport class for IndexService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index: gapic_v1.method.wrap_method( + self.create_index, + default_timeout=None, + client_info=client_info, + ), + self.get_index: gapic_v1.method.wrap_method( + self.get_index, + default_timeout=None, + client_info=client_info, + ), + self.list_indexes: gapic_v1.method.wrap_method( + self.list_indexes, + default_timeout=None, + client_info=client_info, + ), + self.update_index: gapic_v1.method.wrap_method( + self.update_index, + default_timeout=None, + client_info=client_info, + ), + self.delete_index: gapic_v1.method.wrap_method( + self.delete_index, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + Union[ + index.Index, + Awaitable[index.Index] + ]]: + raise NotImplementedError() + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + Union[ + index_service.ListIndexesResponse, + Awaitable[index_service.ListIndexesResponse] + ]]: + raise NotImplementedError() + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'IndexServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py new file mode 100644 index 0000000000..9c165db789 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py @@ -0,0 +1,598 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import index +from google.cloud.aiplatform_v1.types import index_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexServiceGrpcTransport(IndexServiceTransport): + """gRPC backend transport for IndexService. + + A service for creating and managing Vertex AI's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index' not in self._stubs: + self._stubs['create_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/CreateIndex', + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index'] + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + index.Index]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + ~.Index]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index' not in self._stubs: + self._stubs['get_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/GetIndex', + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs['get_index'] + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + index_service.ListIndexesResponse]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + ~.ListIndexesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_indexes' not in self._stubs: + self._stubs['list_indexes'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/ListIndexes', + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs['list_indexes'] + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index' not in self._stubs: + self._stubs['update_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/UpdateIndex', + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_index'] + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index' not in self._stubs: + self._stubs['delete_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/DeleteIndex', + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'IndexServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..807bdaf389 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py @@ -0,0 +1,597 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import index +from google.cloud.aiplatform_v1.types import index_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexServiceGrpcTransport + + +class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): + """gRPC AsyncIO backend transport for IndexService. + + A service for creating and managing Vertex AI's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index' not in self._stubs: + self._stubs['create_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/CreateIndex', + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index'] + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + Awaitable[index.Index]]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + Awaitable[~.Index]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index' not in self._stubs: + self._stubs['get_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/GetIndex', + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs['get_index'] + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + Awaitable[index_service.ListIndexesResponse]]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + Awaitable[~.ListIndexesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_indexes' not in self._stubs: + self._stubs['list_indexes'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/ListIndexes', + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs['list_indexes'] + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index' not in self._stubs: + self._stubs['update_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/UpdateIndex', + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_index'] + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index' not in self._stubs: + self._stubs['delete_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.IndexService/DeleteIndex', + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'IndexServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py new file mode 100644 index 0000000000..3a5ccd23d8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import JobServiceClient +from .async_client import JobServiceAsyncClient + +__all__ = ( + 'JobServiceClient', + 'JobServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py new file mode 100644 index 0000000000..217543cab4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -0,0 +1,3914 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.job_service import pagers +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import completion_stats +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_monitoring +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import unmanaged_container_model +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport +from .client import JobServiceClient + + +class JobServiceAsyncClient: + """A service for creating and managing Vertex AI's jobs.""" + + _client: JobServiceClient + + DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT + + batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) + parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) + custom_job_path = staticmethod(JobServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) + data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) + parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) + dataset_path = staticmethod(JobServiceClient.dataset_path) + parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) + endpoint_path = staticmethod(JobServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(JobServiceClient.parse_endpoint_path) + hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) + parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) + model_path = staticmethod(JobServiceClient.model_path) + parse_model_path = staticmethod(JobServiceClient.parse_model_path) + model_deployment_monitoring_job_path = staticmethod(JobServiceClient.model_deployment_monitoring_job_path) + parse_model_deployment_monitoring_job_path = staticmethod(JobServiceClient.parse_model_deployment_monitoring_job_path) + network_path = staticmethod(JobServiceClient.network_path) + parse_network_path = staticmethod(JobServiceClient.parse_network_path) + tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) + trial_path = staticmethod(JobServiceClient.trial_path) + parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) + common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(JobServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(JobServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) + common_project_path = staticmethod(JobServiceClient.common_project_path) + parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) + common_location_path = staticmethod(JobServiceClient.common_location_path) + parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return JobServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, JobServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = JobServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_custom_job(self, + request: Union[job_service.CreateCustomJobRequest, dict] = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = await client.create_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateCustomJobRequest, dict]): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (:class:`google.cloud.aiplatform_v1.types.CustomJob`): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_custom_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_custom_job(self, + request: Union[job_service.GetCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetCustomJobRequest, dict]): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_custom_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_custom_jobs(self, + request: Union[job_service.ListCustomJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsAsyncPager: + r"""Lists CustomJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListCustomJobsRequest, dict]): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListCustomJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_custom_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListCustomJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_custom_job(self, + request: Union[job_service.DeleteCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteCustomJobRequest, dict]): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_custom_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_custom_job(self, + request: Union[job_service.CancelCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_custom_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelCustomJobRequest, dict]): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_custom_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_data_labeling_job(self, + request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = await client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (:class:`google.cloud.aiplatform_v1.types.DataLabelingJob`): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_data_labeling_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_data_labeling_job(self, + request: Union[job_service.GetDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_data_labeling_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_data_labeling_jobs(self, + request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsAsyncPager: + r"""Lists DataLabelingJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest, dict]): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListDataLabelingJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_labeling_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataLabelingJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_data_labeling_job(self, + request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_data_labeling_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_data_labeling_job(self, + request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_data_labeling_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_data_labeling_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_hyperparameter_tuning_job(self, + request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = await client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1.types.HyperparameterTuningJob`): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_hyperparameter_tuning_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_hyperparameter_tuning_job(self, + request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_hyperparameter_tuning_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_hyperparameter_tuning_jobs(self, + request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsAsyncPager: + r"""Lists HyperparameterTuningJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest, dict]): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListHyperparameterTuningJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_hyperparameter_tuning_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListHyperparameterTuningJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_hyperparameter_tuning_job(self, + request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a HyperparameterTuningJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_hyperparameter_tuning_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_hyperparameter_tuning_job(self, + request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_hyperparameter_tuning_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_hyperparameter_tuning_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_batch_prediction_job(self, + request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = await client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (:class:`google.cloud.aiplatform_v1.types.BatchPredictionJob`): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_batch_prediction_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_batch_prediction_job(self, + request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_batch_prediction_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_batch_prediction_jobs(self, + request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsAsyncPager: + r"""Lists BatchPredictionJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest, dict]): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListBatchPredictionJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_batch_prediction_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBatchPredictionJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_batch_prediction_job(self, + request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_batch_prediction_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_batch_prediction_job(self, + request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_batch_prediction_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_batch_prediction_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_model_deployment_monitoring_job(self, + request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = await client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob`): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model_deployment_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def search_model_deployment_monitoring_stats_anomalies(self, + request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (:class:`str`): + Required. ModelDeploymentMonitoring Job resource name. + Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The DeployedModel ID of the + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_model_deployment_monitoring_stats_anomalies, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_deployment_monitoring_job(self, + request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_deployment_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_deployment_monitoring_jobs(self, + request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest, dict]): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_deployment_monitoring_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model_deployment_monitoring_job(self, + request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob`): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask is used to specify the fields + to be overwritten in the ModelDeploymentMonitoringJob + resource by the update. The fields specified in the + update_mask are relative to the resource, not the full + request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override + all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . + or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model_deployment_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_model_deployment_monitoring_job(self, + request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_deployment_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def pause_model_deployment_monitoring_job(self, + request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.pause_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.pause_model_deployment_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def resume_model_deployment_monitoring_job(self, + request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.resume_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.resume_model_deployment_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py new file mode 100644 index 0000000000..5f403771f9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py @@ -0,0 +1,4198 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.job_service import pagers +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import completion_stats +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_monitoring +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import unmanaged_container_model +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import JobServiceGrpcTransport +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +class JobServiceClientMeta(type): + """Metaclass for the JobService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] + _transport_registry["grpc"] = JobServiceGrpcTransport + _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[JobServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobServiceClient(metaclass=JobServiceClientMeta): + """A service for creating and managing Vertex AI's jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: + """Returns a fully-qualified batch_prediction_job string.""" + return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + + @staticmethod + def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: + """Parses a batch_prediction_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: + """Returns a fully-qualified data_labeling_job string.""" + return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + + @staticmethod + def parse_data_labeling_job_path(path: str) -> Dict[str,str]: + """Parses a data_labeling_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: + """Returns a fully-qualified hyperparameter_tuning_job string.""" + return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + + @staticmethod + def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: + """Parses a hyperparameter_tuning_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: + """Returns a fully-qualified model_deployment_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + + @staticmethod + def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: + """Parses a model_deployment_monitoring_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: + """Returns a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str,str]: + """Parses a tensorboard path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def trial_path(project: str,location: str,study: str,trial: str,) -> str: + """Returns a fully-qualified trial string.""" + return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + + @staticmethod + def parse_trial_path(path: str) -> Dict[str,str]: + """Parses a trial path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, JobServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobServiceTransport): + # transport is a JobServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_custom_job(self, + request: Union[job_service.CreateCustomJobRequest, dict] = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = client.create_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateCustomJobRequest, dict]): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. + parent (str): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (google.cloud.aiplatform_v1.types.CustomJob): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateCustomJobRequest): + request = job_service.CreateCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_custom_job(self, + request: Union[job_service.GetCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetCustomJobRequest, dict]): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetCustomJobRequest): + request = job_service.GetCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_custom_jobs(self, + request: Union[job_service.ListCustomJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsPager: + r"""Lists CustomJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListCustomJobsRequest, dict]): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListCustomJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListCustomJobsRequest): + request = job_service.ListCustomJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListCustomJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_custom_job(self, + request: Union[job_service.DeleteCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteCustomJobRequest, dict]): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. + name (str): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteCustomJobRequest): + request = job_service.DeleteCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_custom_job(self, + request: Union[job_service.CancelCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_custom_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelCustomJobRequest, dict]): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelCustomJobRequest): + request = job_service.CancelCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_data_labeling_job(self, + request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateDataLabelingJobRequest): + request = job_service.CreateDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_data_labeling_job(self, + request: Union[job_service.GetDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetDataLabelingJobRequest): + request = job_service.GetDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_data_labeling_jobs(self, + request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsPager: + r"""Lists DataLabelingJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest, dict]): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListDataLabelingJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListDataLabelingJobsRequest): + request = job_service.ListDataLabelingJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataLabelingJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_data_labeling_job(self, + request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteDataLabelingJobRequest): + request = job_service.DeleteDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_data_labeling_job(self, + request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_data_labeling_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelDataLabelingJobRequest): + request = job_service.CancelDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_hyperparameter_tuning_job(self, + request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. + parent (str): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): + request = job_service.CreateHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_hyperparameter_tuning_job(self, + request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): + request = job_service.GetHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_hyperparameter_tuning_jobs(self, + request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsPager: + r"""Lists HyperparameterTuningJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest, dict]): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListHyperparameterTuningJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): + request = job_service.ListHyperparameterTuningJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListHyperparameterTuningJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_hyperparameter_tuning_job(self, + request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a HyperparameterTuningJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): + request = job_service.DeleteHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_hyperparameter_tuning_job(self, + request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_hyperparameter_tuning_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): + request = job_service.CancelHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_batch_prediction_job(self, + request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. + parent (str): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateBatchPredictionJobRequest): + request = job_service.CreateBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_batch_prediction_job(self, + request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetBatchPredictionJobRequest): + request = job_service.GetBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_batch_prediction_jobs(self, + request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsPager: + r"""Lists BatchPredictionJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest, dict]): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListBatchPredictionJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListBatchPredictionJobsRequest): + request = job_service.ListBatchPredictionJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBatchPredictionJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_batch_prediction_job(self, + request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): + request = job_service.DeleteBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_batch_prediction_job(self, + request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_batch_prediction_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelBatchPredictionJobRequest): + request = job_service.CancelBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_model_deployment_monitoring_job(self, + request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateModelDeploymentMonitoringJobRequest): + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def search_model_deployment_monitoring_stats_anomalies(self, + request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The DeployedModel ID of the + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_model_deployment_monitoring_stats_anomalies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_deployment_monitoring_job(self, + request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetModelDeploymentMonitoringJobRequest): + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_deployment_monitoring_jobs(self, + request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest, dict]): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListModelDeploymentMonitoringJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListModelDeploymentMonitoringJobsRequest): + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_deployment_monitoring_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model_deployment_monitoring_job(self, + request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask is used to specify the fields + to be overwritten in the ModelDeploymentMonitoringJob + resource by the update. The fields specified in the + update_mask are relative to the resource, not the full + request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override + all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . + or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.UpdateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.UpdateModelDeploymentMonitoringJobRequest): + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_model_deployment_monitoring_job(self, + request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteModelDeploymentMonitoringJobRequest): + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def pause_model_deployment_monitoring_job(self, + request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.pause_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.PauseModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.PauseModelDeploymentMonitoringJobRequest): + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.pause_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def resume_model_deployment_monitoring_job(self, + request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.resume_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ResumeModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ResumeModelDeploymentMonitoringJobRequest): + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resume_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py new file mode 100644 index 0000000000..9d1240fecf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py @@ -0,0 +1,750 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job + + +class ListCustomJobsPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListCustomJobsResponse], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[custom_job.CustomJob]: + for page in self.pages: + yield from page.custom_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListCustomJobsAsyncPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[custom_job.CustomJob]: + async def async_generator(): + async for page in self.pages: + for response in page.custom_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListDataLabelingJobsResponse], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[data_labeling_job.DataLabelingJob]: + for page in self.pages: + yield from page.data_labeling_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsAsyncPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[data_labeling_job.DataLabelingJob]: + async def async_generator(): + async for page in self.pages: + for response in page.data_labeling_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[hyperparameter_tuning_job.HyperparameterTuningJob]: + for page in self.pages: + yield from page.hyperparameter_tuning_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsAsyncPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[hyperparameter_tuning_job.HyperparameterTuningJob]: + async def async_generator(): + async for page in self.pages: + for response in page.hyperparameter_tuning_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListBatchPredictionJobsResponse], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[batch_prediction_job.BatchPredictionJob]: + for page in self.pages: + yield from page.batch_prediction_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsAsyncPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[batch_prediction_job.BatchPredictionJob]: + async def async_generator(): + async for page in self.pages: + for response in page.batch_prediction_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + for page in self.pages: + yield from page.monitoring_stats + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + async def async_generator(): + async for page in self.pages: + for response in page.monitoring_stats: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListModelDeploymentMonitoringJobsResponse], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + for page in self.pages: + yield from page.model_deployment_monitoring_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsAsyncPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + async def async_generator(): + async for page in self.pages: + for response in page.model_deployment_monitoring_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py new file mode 100644 index 0000000000..05345304a2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobServiceTransport +from .grpc import JobServiceGrpcTransport +from .grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] +_transport_registry['grpc'] = JobServiceGrpcTransport +_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport + +__all__ = ( + 'JobServiceTransport', + 'JobServiceGrpcTransport', + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py new file mode 100644 index 0000000000..4978f651ed --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -0,0 +1,641 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class JobServiceTransport(abc.ABC): + """Abstract transport class for JobService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_custom_job: gapic_v1.method.wrap_method( + self.create_custom_job, + default_timeout=None, + client_info=client_info, + ), + self.get_custom_job: gapic_v1.method.wrap_method( + self.get_custom_job, + default_timeout=None, + client_info=client_info, + ), + self.list_custom_jobs: gapic_v1.method.wrap_method( + self.list_custom_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_custom_job: gapic_v1.method.wrap_method( + self.delete_custom_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_custom_job: gapic_v1.method.wrap_method( + self.cancel_custom_job, + default_timeout=None, + client_info=client_info, + ), + self.create_data_labeling_job: gapic_v1.method.wrap_method( + self.create_data_labeling_job, + default_timeout=None, + client_info=client_info, + ), + self.get_data_labeling_job: gapic_v1.method.wrap_method( + self.get_data_labeling_job, + default_timeout=None, + client_info=client_info, + ), + self.list_data_labeling_jobs: gapic_v1.method.wrap_method( + self.list_data_labeling_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_data_labeling_job: gapic_v1.method.wrap_method( + self.delete_data_labeling_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_data_labeling_job: gapic_v1.method.wrap_method( + self.cancel_data_labeling_job, + default_timeout=None, + client_info=client_info, + ), + self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.create_hyperparameter_tuning_job, + default_timeout=None, + client_info=client_info, + ), + self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.get_hyperparameter_tuning_job, + default_timeout=None, + client_info=client_info, + ), + self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( + self.list_hyperparameter_tuning_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.delete_hyperparameter_tuning_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.cancel_hyperparameter_tuning_job, + default_timeout=None, + client_info=client_info, + ), + self.create_batch_prediction_job: gapic_v1.method.wrap_method( + self.create_batch_prediction_job, + default_timeout=None, + client_info=client_info, + ), + self.get_batch_prediction_job: gapic_v1.method.wrap_method( + self.get_batch_prediction_job, + default_timeout=None, + client_info=client_info, + ), + self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( + self.list_batch_prediction_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_batch_prediction_job: gapic_v1.method.wrap_method( + self.delete_batch_prediction_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( + self.cancel_batch_prediction_job, + default_timeout=None, + client_info=client_info, + ), + self.create_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.create_model_deployment_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.search_model_deployment_monitoring_stats_anomalies: gapic_v1.method.wrap_method( + self.search_model_deployment_monitoring_stats_anomalies, + default_timeout=None, + client_info=client_info, + ), + self.get_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.get_model_deployment_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.list_model_deployment_monitoring_jobs: gapic_v1.method.wrap_method( + self.list_model_deployment_monitoring_jobs, + default_timeout=None, + client_info=client_info, + ), + self.update_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.update_model_deployment_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.delete_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.delete_model_deployment_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.pause_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.pause_model_deployment_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.resume_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.resume_model_deployment_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Union[ + gca_custom_job.CustomJob, + Awaitable[gca_custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Union[ + custom_job.CustomJob, + Awaitable[custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Union[ + job_service.ListCustomJobsResponse, + Awaitable[job_service.ListCustomJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Union[ + gca_data_labeling_job.DataLabelingJob, + Awaitable[gca_data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Union[ + data_labeling_job.DataLabelingJob, + Awaitable[data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Union[ + job_service.ListDataLabelingJobsResponse, + Awaitable[job_service.ListDataLabelingJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Union[ + gca_hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Union[ + hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Union[ + job_service.ListHyperparameterTuningJobsResponse, + Awaitable[job_service.ListHyperparameterTuningJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Union[ + gca_batch_prediction_job.BatchPredictionJob, + Awaitable[gca_batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Union[ + batch_prediction_job.BatchPredictionJob, + Awaitable[batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Union[ + job_service.ListBatchPredictionJobsResponse, + Awaitable[job_service.ListBatchPredictionJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + Union[ + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob] + ]]: + raise NotImplementedError() + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Union[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse] + ]]: + raise NotImplementedError() + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + Union[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob] + ]]: + raise NotImplementedError() + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + Union[ + job_service.ListModelDeploymentMonitoringJobsResponse, + Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse] + ]]: + raise NotImplementedError() + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'JobServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py new file mode 100644 index 0000000000..cc62cd0731 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -0,0 +1,1262 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO + + +class JobServiceGrpcTransport(JobServiceTransport): + """gRPC backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + gca_custom_job.CustomJob]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + custom_job.CustomJob]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + job_service.ListCustomJobsResponse]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + ~.ListCustomJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + gca_data_labeling_job.DataLabelingJob]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + data_labeling_job.DataLabelingJob]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + job_service.ListDataLabelingJobsResponse]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + ~.ListDataLabelingJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + gca_hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + job_service.ListHyperparameterTuningJobsResponse]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + ~.ListHyperparameterTuningJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + gca_batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + job_service.ListBatchPredictionJobsResponse]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + ~.ListBatchPredictionJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model_deployment_monitoring_job' not in self._stubs: + self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob', + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['create_model_deployment_monitoring_job'] + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + ~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: + self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs['search_model_deployment_monitoring_stats_anomalies'] + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_deployment_monitoring_job' not in self._stubs: + self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob', + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['get_model_deployment_monitoring_job'] + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + job_service.ListModelDeploymentMonitoringJobsResponse]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + ~.ListModelDeploymentMonitoringJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_deployment_monitoring_jobs' not in self._stubs: + self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs', + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs['list_model_deployment_monitoring_jobs'] + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model_deployment_monitoring_job' not in self._stubs: + self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob', + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_model_deployment_monitoring_job'] + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_deployment_monitoring_job' not in self._stubs: + self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob', + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_deployment_monitoring_job'] + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'pause_model_deployment_monitoring_job' not in self._stubs: + self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob', + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['pause_model_deployment_monitoring_job'] + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'resume_model_deployment_monitoring_job' not in self._stubs: + self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob', + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['resume_model_deployment_monitoring_job'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'JobServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..4a382b31ae --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -0,0 +1,1261 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import JobServiceGrpcTransport + + +class JobServiceGrpcAsyncIOTransport(JobServiceTransport): + """gRPC AsyncIO backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Awaitable[gca_custom_job.CustomJob]]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Awaitable[custom_job.CustomJob]]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Awaitable[job_service.ListCustomJobsResponse]]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + Awaitable[~.ListCustomJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Awaitable[gca_data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Awaitable[data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Awaitable[job_service.ListDataLabelingJobsResponse]]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + Awaitable[~.ListDataLabelingJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + Awaitable[~.ListHyperparameterTuningJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Awaitable[batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Awaitable[job_service.ListBatchPredictionJobsResponse]]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + Awaitable[~.ListBatchPredictionJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model_deployment_monitoring_job' not in self._stubs: + self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob', + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['create_model_deployment_monitoring_job'] + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: + self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs['search_model_deployment_monitoring_stats_anomalies'] + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_deployment_monitoring_job' not in self._stubs: + self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob', + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['get_model_deployment_monitoring_job'] + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_deployment_monitoring_jobs' not in self._stubs: + self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs', + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs['list_model_deployment_monitoring_jobs'] + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model_deployment_monitoring_job' not in self._stubs: + self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob', + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_model_deployment_monitoring_job'] + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_deployment_monitoring_job' not in self._stubs: + self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob', + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_deployment_monitoring_job'] + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'pause_model_deployment_monitoring_job' not in self._stubs: + self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob', + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['pause_model_deployment_monitoring_job'] + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'resume_model_deployment_monitoring_job' not in self._stubs: + self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob', + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['resume_model_deployment_monitoring_job'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py new file mode 100644 index 0000000000..97cb7deb5d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MetadataServiceClient +from .async_client import MetadataServiceAsyncClient + +__all__ = ( + 'MetadataServiceClient', + 'MetadataServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py new file mode 100644 index 0000000000..65c3cc0459 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -0,0 +1,4287 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.metadata_service import pagers +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport +from .client import MetadataServiceClient + + +class MetadataServiceAsyncClient: + """Service for reading and writing metadata entries.""" + + _client: MetadataServiceClient + + DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(MetadataServiceClient.artifact_path) + parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) + context_path = staticmethod(MetadataServiceClient.context_path) + parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) + execution_path = staticmethod(MetadataServiceClient.execution_path) + parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) + metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) + parse_metadata_schema_path = staticmethod(MetadataServiceClient.parse_metadata_schema_path) + metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) + parse_metadata_store_path = staticmethod(MetadataServiceClient.parse_metadata_store_path) + common_billing_account_path = staticmethod(MetadataServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(MetadataServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(MetadataServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(MetadataServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(MetadataServiceClient.parse_common_organization_path) + common_project_path = staticmethod(MetadataServiceClient.common_project_path) + parse_common_project_path = staticmethod(MetadataServiceClient.parse_common_project_path) + common_location_path = staticmethod(MetadataServiceClient.common_location_path) + parse_common_location_path = staticmethod(MetadataServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return MetadataServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MetadataServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_metadata_store(self, + request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Initializes a MetadataStore, including allocation of + resources. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + parent (:class:`str`): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (:class:`google.cloud.aiplatform_v1.types.MetadataStore`): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (:class:`str`): + The {metadatastore} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_store, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_metadata_store(self, + request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_store(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. + name (:class:`str`): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_store, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_metadata_stores(self, + request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresAsyncPager: + r"""Lists MetadataStores for a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + parent (:class:`str`): + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListMetadataStoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_stores, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataStoresAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_metadata_store(self, + request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + name (:class:`str`): + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_metadata_store, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_artifact(self, + request: Union[metadata_service.CreateArtifactRequest, dict] = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (:class:`str`): + The {artifact} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_artifact(self, + request: Union[metadata_service.GetArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = await client.get_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. + name (:class:`str`): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_artifacts(self, + request: Union[metadata_service.ListArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsAsyncPager: + r"""Lists Artifacts in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + parent (:class:`str`): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_artifacts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListArtifactsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_artifact(self, + request: Union[metadata_service.UpdateArtifactRequest, dict] = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateArtifactRequest( + ) + + # Make the request + response = await client.update_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. + artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact.name", request.artifact.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_artifact(self, + request: Union[metadata_service.DeleteArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]): + The request object. Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. + name (:class:`str`): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_artifacts(self, + request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Artifacts. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + parent (:class:`str`): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeArtifactsResponse` + Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.PurgeArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_artifacts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeArtifactsResponse, + metadata_type=metadata_service.PurgeArtifactsMetadata, + ) + + # Done; return the response. + return response + + async def create_context(self, + request: Union[metadata_service.CreateContextRequest, dict] = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (:class:`google.cloud.aiplatform_v1.types.Context`): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (:class:`str`): + The {context} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_context(self, + request: Union[metadata_service.GetContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = await client.get_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. + name (:class:`str`): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_contexts(self, + request: Union[metadata_service.ListContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsAsyncPager: + r"""Lists Contexts on the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + parent (:class:`str`): + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_contexts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListContextsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_context(self, + request: Union[metadata_service.UpdateContextRequest, dict] = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateContextRequest( + ) + + # Make the request + response = await client.update_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. + context (:class:`google.cloud.aiplatform_v1.types.Context`): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1.Context.name] + field is used to identify the Context to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context.name", request.context.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_context(self, + request: Union[metadata_service.DeleteContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. + name (:class:`str`): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_context, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_contexts(self, + request: Union[metadata_service.PurgeContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Contexts. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_purge_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + parent (:class:`str`): + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeContextsResponse` + Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.PurgeContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_contexts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeContextsResponse, + metadata_type=metadata_service.PurgeContextsMetadata, + ) + + # Done; return the response. + return response + + async def add_context_artifacts_and_executions(self, + request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + context (:class:`str`): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (:class:`Sequence[str]`): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (:class:`Sequence[str]`): + The resource names of the Executions to associate with + the Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts: + request.artifacts.extend(artifacts) + if executions: + request.executions.extend(executions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_artifacts_and_executions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def add_context_children(self, + request: Union[metadata_service.AddContextChildrenRequest, dict] = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_add_context_children(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_children(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + context (:class:`str`): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (:class:`Sequence[str]`): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddContextChildrenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts: + request.child_contexts.extend(child_contexts) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_children, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_context_lineage_subgraph(self, + request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, + *, + context: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = await client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. + context (:class:`str`): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryContextLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_context_lineage_subgraph, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_execution(self, + request: Union[metadata_service.CreateExecutionRequest, dict] = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (:class:`google.cloud.aiplatform_v1.types.Execution`): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (:class:`str`): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_execution(self, + request: Union[metadata_service.GetExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. + name (:class:`str`): + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_executions(self, + request: Union[metadata_service.ListExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsAsyncPager: + r"""Lists Executions in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + parent (:class:`str`): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_executions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExecutionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_execution(self, + request: Union[metadata_service.UpdateExecutionRequest, dict] = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateExecutionRequest( + ) + + # Make the request + response = await client.update_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. + execution (:class:`google.cloud.aiplatform_v1.types.Execution`): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution.name", request.execution.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_execution(self, + request: Union[metadata_service.DeleteExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]): + The request object. Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. + name (:class:`str`): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_executions(self, + request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Executions. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_purge_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + parent (:class:`str`): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeExecutionsResponse` + Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.PurgeExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_executions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeExecutionsResponse, + metadata_type=metadata_service.PurgeExecutionsMetadata, + ) + + # Done; return the response. + return response + + async def add_execution_events(self, + request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_add_execution_events(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.add_execution_events(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + execution (:class:`str`): + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (:class:`Sequence[google.cloud.aiplatform_v1.types.Event]`): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddExecutionEventsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events: + request.events.extend(events) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_execution_events, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_execution_inputs_and_outputs(self, + request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, + *, + execution: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (:class:`str`): + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_execution_inputs_and_outputs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_metadata_schema(self, + request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = await client.create_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (:class:`google.cloud.aiplatform_v1.types.MetadataSchema`): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (:class:`str`): + The {metadata_schema} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_schema, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_metadata_schema(self, + request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. + name (:class:`str`): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_schema, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_metadata_schemas(self, + request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasAsyncPager: + r"""Lists MetadataSchemas. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + parent (:class:`str`): + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListMetadataSchemasRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_schemas, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataSchemasAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_artifact_lineage_subgraph(self, + request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, + *, + artifact: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = await client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (:class:`str`): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact", request.artifact), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MetadataServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py new file mode 100644 index 0000000000..831b74747a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py @@ -0,0 +1,4517 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.metadata_service import pagers +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MetadataServiceGrpcTransport +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +class MetadataServiceClientMeta(type): + """Metaclass for the MetadataService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] + _transport_registry["grpc"] = MetadataServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MetadataServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MetadataServiceClient(metaclass=MetadataServiceClientMeta): + """Service for reading and writing metadata entries.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str,str]: + """Parses a artifact path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str,str]: + """Parses a context path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str,str]: + """Parses a execution path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def metadata_schema_path(project: str,location: str,metadata_store: str,metadata_schema: str,) -> str: + """Returns a fully-qualified metadata_schema string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) + + @staticmethod + def parse_metadata_schema_path(path: str) -> Dict[str,str]: + """Parses a metadata_schema path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def metadata_store_path(project: str,location: str,metadata_store: str,) -> str: + """Returns a fully-qualified metadata_store string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) + + @staticmethod + def parse_metadata_store_path(path: str) -> Dict[str,str]: + """Parses a metadata_store path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MetadataServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MetadataServiceTransport): + # transport is a MetadataServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_metadata_store(self, + request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Initializes a MetadataStore, including allocation of + resources. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + parent (str): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (google.cloud.aiplatform_v1.types.MetadataStore): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (str): + The {metadatastore} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataStoreRequest): + request = metadata_service.CreateMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_metadata_store(self, + request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_store(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. + name (str): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataStoreRequest): + request = metadata_service.GetMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_metadata_stores(self, + request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresPager: + r"""Lists MetadataStores for a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + parent (str): + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataStoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataStoresRequest): + request = metadata_service.ListMetadataStoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataStoresPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_metadata_store(self, + request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + name (str): + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): + request = metadata_service.DeleteMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def create_artifact(self, + request: Union[metadata_service.CreateArtifactRequest, dict] = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. + parent (str): + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (str): + The {artifact} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateArtifactRequest): + request = metadata_service.CreateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_artifact(self, + request: Union[metadata_service.GetArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = client.get_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. + name (str): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetArtifactRequest): + request = metadata_service.GetArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_artifacts(self, + request: Union[metadata_service.ListArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsPager: + r"""Lists Artifacts in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + parent (str): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListArtifactsRequest): + request = metadata_service.ListArtifactsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListArtifactsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_artifact(self, + request: Union[metadata_service.UpdateArtifactRequest, dict] = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateArtifactRequest( + ) + + # Make the request + response = client.update_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateArtifactRequest): + request = metadata_service.UpdateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact.name", request.artifact.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_artifact(self, + request: Union[metadata_service.DeleteArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]): + The request object. Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. + name (str): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteArtifactRequest): + request = metadata_service.DeleteArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_artifacts(self, + request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Artifacts. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + parent (str): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeArtifactsResponse` + Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeArtifactsRequest): + request = metadata_service.PurgeArtifactsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeArtifactsResponse, + metadata_type=metadata_service.PurgeArtifactsMetadata, + ) + + # Done; return the response. + return response + + def create_context(self, + request: Union[metadata_service.CreateContextRequest, dict] = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. + parent (str): + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (str): + The {context} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateContextRequest): + request = metadata_service.CreateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_context(self, + request: Union[metadata_service.GetContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = client.get_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. + name (str): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetContextRequest): + request = metadata_service.GetContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_contexts(self, + request: Union[metadata_service.ListContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsPager: + r"""Lists Contexts on the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + parent (str): + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListContextsRequest): + request = metadata_service.ListContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListContextsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_context(self, + request: Union[metadata_service.UpdateContextRequest, dict] = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateContextRequest( + ) + + # Make the request + response = client.update_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1.Context.name] + field is used to identify the Context to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateContextRequest): + request = metadata_service.UpdateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context.name", request.context.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_context(self, + request: Union[metadata_service.DeleteContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. + name (str): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteContextRequest): + request = metadata_service.DeleteContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_contexts(self, + request: Union[metadata_service.PurgeContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Contexts. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_purge_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + parent (str): + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeContextsResponse` + Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeContextsRequest): + request = metadata_service.PurgeContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeContextsResponse, + metadata_type=metadata_service.PurgeContextsMetadata, + ) + + # Done; return the response. + return response + + def add_context_artifacts_and_executions(self, + request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + context (str): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (Sequence[str]): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (Sequence[str]): + The resource names of the Executions to associate with + the Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextArtifactsAndExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextArtifactsAndExecutionsRequest): + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts is not None: + request.artifacts = artifacts + if executions is not None: + request.executions = executions + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_artifacts_and_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_context_children(self, + request: Union[metadata_service.AddContextChildrenRequest, dict] = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_add_context_children(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_children(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + context (str): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (Sequence[str]): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextChildrenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextChildrenRequest): + request = metadata_service.AddContextChildrenRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts is not None: + request.child_contexts = child_contexts + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_children] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_context_lineage_subgraph(self, + request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, + *, + context: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. + context (str): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryContextLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): + request = metadata_service.QueryContextLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_context_lineage_subgraph] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_execution(self, + request: Union[metadata_service.CreateExecutionRequest, dict] = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. + parent (str): + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (str): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateExecutionRequest): + request = metadata_service.CreateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_execution(self, + request: Union[metadata_service.GetExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = client.get_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. + name (str): + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetExecutionRequest): + request = metadata_service.GetExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_executions(self, + request: Union[metadata_service.ListExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsPager: + r"""Lists Executions in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + parent (str): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListExecutionsRequest): + request = metadata_service.ListExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExecutionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_execution(self, + request: Union[metadata_service.UpdateExecutionRequest, dict] = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateExecutionRequest( + ) + + # Make the request + response = client.update_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateExecutionRequest): + request = metadata_service.UpdateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution.name", request.execution.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_execution(self, + request: Union[metadata_service.DeleteExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]): + The request object. Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. + name (str): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteExecutionRequest): + request = metadata_service.DeleteExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_executions(self, + request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Executions. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_purge_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + parent (str): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.PurgeExecutionsResponse` + Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeExecutionsRequest): + request = metadata_service.PurgeExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeExecutionsResponse, + metadata_type=metadata_service.PurgeExecutionsMetadata, + ) + + # Done; return the response. + return response + + def add_execution_events(self, + request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_add_execution_events(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = client.add_execution_events(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + execution (str): + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (Sequence[google.cloud.aiplatform_v1.types.Event]): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddExecutionEventsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddExecutionEventsRequest): + request = metadata_service.AddExecutionEventsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events is not None: + request.events = events + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_execution_events] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_execution_inputs_and_outputs(self, + request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, + *, + execution: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (str): + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryExecutionInputsAndOutputsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryExecutionInputsAndOutputsRequest): + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_execution_inputs_and_outputs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_metadata_schema(self, + request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = client.create_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. + parent (str): + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (google.cloud.aiplatform_v1.types.MetadataSchema): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): + request = metadata_service.CreateMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_metadata_schema(self, + request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. + name (str): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataSchemaRequest): + request = metadata_service.GetMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_metadata_schemas(self, + request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasPager: + r"""Lists MetadataSchemas. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + parent (str): + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataSchemasRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataSchemasRequest): + request = metadata_service.ListMetadataSchemasRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataSchemasPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_artifact_lineage_subgraph(self, + request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, + *, + artifact: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (str): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryArtifactLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryArtifactLineageSubgraphRequest): + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_artifact_lineage_subgraph] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact", request.artifact), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MetadataServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py new file mode 100644 index 0000000000..00ad6d4296 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py @@ -0,0 +1,628 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store + + +class ListMetadataStoresPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListMetadataStoresResponse], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[metadata_store.MetadataStore]: + for page in self.pages: + yield from page.metadata_stores + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataStoresAsyncPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[metadata_store.MetadataStore]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_stores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListArtifactsPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListArtifactsResponse], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[artifact.Artifact]: + for page in self.pages: + yield from page.artifacts + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListArtifactsAsyncPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[artifact.Artifact]: + async def async_generator(): + async for page in self.pages: + for response in page.artifacts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListContextsPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListContextsResponse], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[context.Context]: + for page in self.pages: + yield from page.contexts + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListContextsAsyncPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[context.Context]: + async def async_generator(): + async for page in self.pages: + for response in page.contexts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListExecutionsPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListExecutionsResponse], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[execution.Execution]: + for page in self.pages: + yield from page.executions + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListExecutionsAsyncPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[execution.Execution]: + async def async_generator(): + async for page in self.pages: + for response in page.executions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListMetadataSchemasResponse], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[metadata_schema.MetadataSchema]: + for page in self.pages: + yield from page.metadata_schemas + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasAsyncPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[metadata_schema.MetadataSchema]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_schemas: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py new file mode 100644 index 0000000000..63e43972af --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MetadataServiceTransport +from .grpc import MetadataServiceGrpcTransport +from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] +_transport_registry['grpc'] = MetadataServiceGrpcTransport +_transport_registry['grpc_asyncio'] = MetadataServiceGrpcAsyncIOTransport + +__all__ = ( + 'MetadataServiceTransport', + 'MetadataServiceGrpcTransport', + 'MetadataServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py new file mode 100644 index 0000000000..fc0ab1f12f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py @@ -0,0 +1,682 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MetadataServiceTransport(abc.ABC): + """Abstract transport class for MetadataService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_metadata_store: gapic_v1.method.wrap_method( + self.create_metadata_store, + default_timeout=None, + client_info=client_info, + ), + self.get_metadata_store: gapic_v1.method.wrap_method( + self.get_metadata_store, + default_timeout=None, + client_info=client_info, + ), + self.list_metadata_stores: gapic_v1.method.wrap_method( + self.list_metadata_stores, + default_timeout=None, + client_info=client_info, + ), + self.delete_metadata_store: gapic_v1.method.wrap_method( + self.delete_metadata_store, + default_timeout=None, + client_info=client_info, + ), + self.create_artifact: gapic_v1.method.wrap_method( + self.create_artifact, + default_timeout=None, + client_info=client_info, + ), + self.get_artifact: gapic_v1.method.wrap_method( + self.get_artifact, + default_timeout=None, + client_info=client_info, + ), + self.list_artifacts: gapic_v1.method.wrap_method( + self.list_artifacts, + default_timeout=None, + client_info=client_info, + ), + self.update_artifact: gapic_v1.method.wrap_method( + self.update_artifact, + default_timeout=None, + client_info=client_info, + ), + self.delete_artifact: gapic_v1.method.wrap_method( + self.delete_artifact, + default_timeout=None, + client_info=client_info, + ), + self.purge_artifacts: gapic_v1.method.wrap_method( + self.purge_artifacts, + default_timeout=None, + client_info=client_info, + ), + self.create_context: gapic_v1.method.wrap_method( + self.create_context, + default_timeout=None, + client_info=client_info, + ), + self.get_context: gapic_v1.method.wrap_method( + self.get_context, + default_timeout=None, + client_info=client_info, + ), + self.list_contexts: gapic_v1.method.wrap_method( + self.list_contexts, + default_timeout=None, + client_info=client_info, + ), + self.update_context: gapic_v1.method.wrap_method( + self.update_context, + default_timeout=None, + client_info=client_info, + ), + self.delete_context: gapic_v1.method.wrap_method( + self.delete_context, + default_timeout=None, + client_info=client_info, + ), + self.purge_contexts: gapic_v1.method.wrap_method( + self.purge_contexts, + default_timeout=None, + client_info=client_info, + ), + self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( + self.add_context_artifacts_and_executions, + default_timeout=None, + client_info=client_info, + ), + self.add_context_children: gapic_v1.method.wrap_method( + self.add_context_children, + default_timeout=None, + client_info=client_info, + ), + self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_context_lineage_subgraph, + default_timeout=None, + client_info=client_info, + ), + self.create_execution: gapic_v1.method.wrap_method( + self.create_execution, + default_timeout=None, + client_info=client_info, + ), + self.get_execution: gapic_v1.method.wrap_method( + self.get_execution, + default_timeout=None, + client_info=client_info, + ), + self.list_executions: gapic_v1.method.wrap_method( + self.list_executions, + default_timeout=None, + client_info=client_info, + ), + self.update_execution: gapic_v1.method.wrap_method( + self.update_execution, + default_timeout=None, + client_info=client_info, + ), + self.delete_execution: gapic_v1.method.wrap_method( + self.delete_execution, + default_timeout=None, + client_info=client_info, + ), + self.purge_executions: gapic_v1.method.wrap_method( + self.purge_executions, + default_timeout=None, + client_info=client_info, + ), + self.add_execution_events: gapic_v1.method.wrap_method( + self.add_execution_events, + default_timeout=None, + client_info=client_info, + ), + self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( + self.query_execution_inputs_and_outputs, + default_timeout=None, + client_info=client_info, + ), + self.create_metadata_schema: gapic_v1.method.wrap_method( + self.create_metadata_schema, + default_timeout=None, + client_info=client_info, + ), + self.get_metadata_schema: gapic_v1.method.wrap_method( + self.get_metadata_schema, + default_timeout=None, + client_info=client_info, + ), + self.list_metadata_schemas: gapic_v1.method.wrap_method( + self.list_metadata_schemas, + default_timeout=None, + client_info=client_info, + ), + self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Union[ + metadata_store.MetadataStore, + Awaitable[metadata_store.MetadataStore] + ]]: + raise NotImplementedError() + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Union[ + metadata_service.ListMetadataStoresResponse, + Awaitable[metadata_service.ListMetadataStoresResponse] + ]]: + raise NotImplementedError() + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + Union[ + gca_artifact.Artifact, + Awaitable[gca_artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + Union[ + artifact.Artifact, + Awaitable[artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + Union[ + metadata_service.ListArtifactsResponse, + Awaitable[metadata_service.ListArtifactsResponse] + ]]: + raise NotImplementedError() + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + Union[ + gca_artifact.Artifact, + Awaitable[gca_artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def delete_artifact(self) -> Callable[ + [metadata_service.DeleteArtifactRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def purge_artifacts(self) -> Callable[ + [metadata_service.PurgeArtifactsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + Union[ + gca_context.Context, + Awaitable[gca_context.Context] + ]]: + raise NotImplementedError() + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + Union[ + context.Context, + Awaitable[context.Context] + ]]: + raise NotImplementedError() + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + Union[ + metadata_service.ListContextsResponse, + Awaitable[metadata_service.ListContextsResponse] + ]]: + raise NotImplementedError() + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + Union[ + gca_context.Context, + Awaitable[gca_context.Context] + ]]: + raise NotImplementedError() + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def purge_contexts(self) -> Callable[ + [metadata_service.PurgeContextsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Union[ + metadata_service.AddContextArtifactsAndExecutionsResponse, + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse] + ]]: + raise NotImplementedError() + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Union[ + metadata_service.AddContextChildrenResponse, + Awaitable[metadata_service.AddContextChildrenResponse] + ]]: + raise NotImplementedError() + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + Union[ + gca_execution.Execution, + Awaitable[gca_execution.Execution] + ]]: + raise NotImplementedError() + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + Union[ + execution.Execution, + Awaitable[execution.Execution] + ]]: + raise NotImplementedError() + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + Union[ + metadata_service.ListExecutionsResponse, + Awaitable[metadata_service.ListExecutionsResponse] + ]]: + raise NotImplementedError() + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + Union[ + gca_execution.Execution, + Awaitable[gca_execution.Execution] + ]]: + raise NotImplementedError() + + @property + def delete_execution(self) -> Callable[ + [metadata_service.DeleteExecutionRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def purge_executions(self) -> Callable[ + [metadata_service.PurgeExecutionsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Union[ + metadata_service.AddExecutionEventsResponse, + Awaitable[metadata_service.AddExecutionEventsResponse] + ]]: + raise NotImplementedError() + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Union[ + gca_metadata_schema.MetadataSchema, + Awaitable[gca_metadata_schema.MetadataSchema] + ]]: + raise NotImplementedError() + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Union[ + metadata_schema.MetadataSchema, + Awaitable[metadata_schema.MetadataSchema] + ]]: + raise NotImplementedError() + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Union[ + metadata_service.ListMetadataSchemasResponse, + Awaitable[metadata_service.ListMetadataSchemasResponse] + ]]: + raise NotImplementedError() + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'MetadataServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py new file mode 100644 index 0000000000..b6f9db2538 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py @@ -0,0 +1,1301 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO + + +class MetadataServiceGrpcTransport(MetadataServiceTransport): + """gRPC backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_store' not in self._stubs: + self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataStore', + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_metadata_store'] + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + metadata_store.MetadataStore]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + ~.MetadataStore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_store' not in self._stubs: + self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetMetadataStore', + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs['get_metadata_store'] + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + metadata_service.ListMetadataStoresResponse]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + ~.ListMetadataStoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_stores' not in self._stubs: + self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListMetadataStores', + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs['list_metadata_stores'] + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_metadata_store' not in self._stubs: + self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteMetadataStore', + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_metadata_store'] + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + gca_artifact.Artifact]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_artifact' not in self._stubs: + self._stubs['create_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateArtifact', + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['create_artifact'] + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + artifact.Artifact]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_artifact' not in self._stubs: + self._stubs['get_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetArtifact', + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs['get_artifact'] + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + metadata_service.ListArtifactsResponse]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + ~.ListArtifactsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_artifacts' not in self._stubs: + self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListArtifacts', + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs['list_artifacts'] + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + gca_artifact.Artifact]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_artifact' not in self._stubs: + self._stubs['update_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/UpdateArtifact', + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['update_artifact'] + + @property + def delete_artifact(self) -> Callable[ + [metadata_service.DeleteArtifactRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete artifact method over gRPC. + + Deletes an Artifact. + + Returns: + Callable[[~.DeleteArtifactRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_artifact' not in self._stubs: + self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteArtifact', + request_serializer=metadata_service.DeleteArtifactRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_artifact'] + + @property + def purge_artifacts(self) -> Callable[ + [metadata_service.PurgeArtifactsRequest], + operations_pb2.Operation]: + r"""Return a callable for the purge artifacts method over gRPC. + + Purges Artifacts. + + Returns: + Callable[[~.PurgeArtifactsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_artifacts' not in self._stubs: + self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/PurgeArtifacts', + request_serializer=metadata_service.PurgeArtifactsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_artifacts'] + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + gca_context.Context]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_context' not in self._stubs: + self._stubs['create_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateContext', + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['create_context'] + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + context.Context]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_context' not in self._stubs: + self._stubs['get_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetContext', + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs['get_context'] + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + metadata_service.ListContextsResponse]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + ~.ListContextsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_contexts' not in self._stubs: + self._stubs['list_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListContexts', + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs['list_contexts'] + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + gca_context.Context]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_context' not in self._stubs: + self._stubs['update_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/UpdateContext', + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['update_context'] + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_context' not in self._stubs: + self._stubs['delete_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteContext', + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_context'] + + @property + def purge_contexts(self) -> Callable[ + [metadata_service.PurgeContextsRequest], + operations_pb2.Operation]: + r"""Return a callable for the purge contexts method over gRPC. + + Purges Contexts. + + Returns: + Callable[[~.PurgeContextsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_contexts' not in self._stubs: + self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/PurgeContexts', + request_serializer=metadata_service.PurgeContextsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_contexts'] + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + metadata_service.AddContextArtifactsAndExecutionsResponse]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + ~.AddContextArtifactsAndExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_artifacts_and_executions' not in self._stubs: + self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions', + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs['add_context_artifacts_and_executions'] + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + metadata_service.AddContextChildrenResponse]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + ~.AddContextChildrenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_children' not in self._stubs: + self._stubs['add_context_children'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/AddContextChildren', + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs['add_context_children'] + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_context_lineage_subgraph' not in self._stubs: + self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph', + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_context_lineage_subgraph'] + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + gca_execution.Execution]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_execution' not in self._stubs: + self._stubs['create_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateExecution', + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['create_execution'] + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + execution.Execution]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_execution' not in self._stubs: + self._stubs['get_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetExecution', + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs['get_execution'] + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + metadata_service.ListExecutionsResponse]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + ~.ListExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_executions' not in self._stubs: + self._stubs['list_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListExecutions', + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs['list_executions'] + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + gca_execution.Execution]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_execution' not in self._stubs: + self._stubs['update_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/UpdateExecution', + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['update_execution'] + + @property + def delete_execution(self) -> Callable[ + [metadata_service.DeleteExecutionRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete execution method over gRPC. + + Deletes an Execution. + + Returns: + Callable[[~.DeleteExecutionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_execution' not in self._stubs: + self._stubs['delete_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteExecution', + request_serializer=metadata_service.DeleteExecutionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_execution'] + + @property + def purge_executions(self) -> Callable[ + [metadata_service.PurgeExecutionsRequest], + operations_pb2.Operation]: + r"""Return a callable for the purge executions method over gRPC. + + Purges Executions. + + Returns: + Callable[[~.PurgeExecutionsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_executions' not in self._stubs: + self._stubs['purge_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/PurgeExecutions', + request_serializer=metadata_service.PurgeExecutionsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_executions'] + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + metadata_service.AddExecutionEventsResponse]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + ~.AddExecutionEventsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_execution_events' not in self._stubs: + self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/AddExecutionEvents', + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs['add_execution_events'] + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_execution_inputs_and_outputs' not in self._stubs: + self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs', + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_execution_inputs_and_outputs'] + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + gca_metadata_schema.MetadataSchema]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_schema' not in self._stubs: + self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataSchema', + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['create_metadata_schema'] + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + metadata_schema.MetadataSchema]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_schema' not in self._stubs: + self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetMetadataSchema', + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['get_metadata_schema'] + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + metadata_service.ListMetadataSchemasResponse]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + ~.ListMetadataSchemasResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_schemas' not in self._stubs: + self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListMetadataSchemas', + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs['list_metadata_schemas'] + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_artifact_lineage_subgraph' not in self._stubs: + self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph', + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_artifact_lineage_subgraph'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'MetadataServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..704b7f3cf1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py @@ -0,0 +1,1300 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MetadataServiceGrpcTransport + + +class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): + """gRPC AsyncIO backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_store' not in self._stubs: + self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataStore', + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_metadata_store'] + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Awaitable[metadata_store.MetadataStore]]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + Awaitable[~.MetadataStore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_store' not in self._stubs: + self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetMetadataStore', + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs['get_metadata_store'] + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Awaitable[metadata_service.ListMetadataStoresResponse]]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + Awaitable[~.ListMetadataStoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_stores' not in self._stubs: + self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListMetadataStores', + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs['list_metadata_stores'] + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_metadata_store' not in self._stubs: + self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteMetadataStore', + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_metadata_store'] + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + Awaitable[gca_artifact.Artifact]]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_artifact' not in self._stubs: + self._stubs['create_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateArtifact', + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['create_artifact'] + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + Awaitable[artifact.Artifact]]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_artifact' not in self._stubs: + self._stubs['get_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetArtifact', + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs['get_artifact'] + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + Awaitable[metadata_service.ListArtifactsResponse]]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + Awaitable[~.ListArtifactsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_artifacts' not in self._stubs: + self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListArtifacts', + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs['list_artifacts'] + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + Awaitable[gca_artifact.Artifact]]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_artifact' not in self._stubs: + self._stubs['update_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/UpdateArtifact', + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['update_artifact'] + + @property + def delete_artifact(self) -> Callable[ + [metadata_service.DeleteArtifactRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete artifact method over gRPC. + + Deletes an Artifact. + + Returns: + Callable[[~.DeleteArtifactRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_artifact' not in self._stubs: + self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteArtifact', + request_serializer=metadata_service.DeleteArtifactRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_artifact'] + + @property + def purge_artifacts(self) -> Callable[ + [metadata_service.PurgeArtifactsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the purge artifacts method over gRPC. + + Purges Artifacts. + + Returns: + Callable[[~.PurgeArtifactsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_artifacts' not in self._stubs: + self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/PurgeArtifacts', + request_serializer=metadata_service.PurgeArtifactsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_artifacts'] + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + Awaitable[gca_context.Context]]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_context' not in self._stubs: + self._stubs['create_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateContext', + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['create_context'] + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + Awaitable[context.Context]]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_context' not in self._stubs: + self._stubs['get_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetContext', + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs['get_context'] + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + Awaitable[metadata_service.ListContextsResponse]]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + Awaitable[~.ListContextsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_contexts' not in self._stubs: + self._stubs['list_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListContexts', + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs['list_contexts'] + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + Awaitable[gca_context.Context]]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_context' not in self._stubs: + self._stubs['update_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/UpdateContext', + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['update_context'] + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_context' not in self._stubs: + self._stubs['delete_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteContext', + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_context'] + + @property + def purge_contexts(self) -> Callable[ + [metadata_service.PurgeContextsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the purge contexts method over gRPC. + + Purges Contexts. + + Returns: + Callable[[~.PurgeContextsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_contexts' not in self._stubs: + self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/PurgeContexts', + request_serializer=metadata_service.PurgeContextsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_contexts'] + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse]]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_artifacts_and_executions' not in self._stubs: + self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions', + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs['add_context_artifacts_and_executions'] + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Awaitable[metadata_service.AddContextChildrenResponse]]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + Awaitable[~.AddContextChildrenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_children' not in self._stubs: + self._stubs['add_context_children'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/AddContextChildren', + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs['add_context_children'] + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_context_lineage_subgraph' not in self._stubs: + self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph', + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_context_lineage_subgraph'] + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + Awaitable[gca_execution.Execution]]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_execution' not in self._stubs: + self._stubs['create_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateExecution', + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['create_execution'] + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + Awaitable[execution.Execution]]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_execution' not in self._stubs: + self._stubs['get_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetExecution', + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs['get_execution'] + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + Awaitable[metadata_service.ListExecutionsResponse]]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + Awaitable[~.ListExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_executions' not in self._stubs: + self._stubs['list_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListExecutions', + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs['list_executions'] + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + Awaitable[gca_execution.Execution]]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_execution' not in self._stubs: + self._stubs['update_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/UpdateExecution', + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['update_execution'] + + @property + def delete_execution(self) -> Callable[ + [metadata_service.DeleteExecutionRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete execution method over gRPC. + + Deletes an Execution. + + Returns: + Callable[[~.DeleteExecutionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_execution' not in self._stubs: + self._stubs['delete_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/DeleteExecution', + request_serializer=metadata_service.DeleteExecutionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_execution'] + + @property + def purge_executions(self) -> Callable[ + [metadata_service.PurgeExecutionsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the purge executions method over gRPC. + + Purges Executions. + + Returns: + Callable[[~.PurgeExecutionsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_executions' not in self._stubs: + self._stubs['purge_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/PurgeExecutions', + request_serializer=metadata_service.PurgeExecutionsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_executions'] + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Awaitable[metadata_service.AddExecutionEventsResponse]]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + Awaitable[~.AddExecutionEventsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_execution_events' not in self._stubs: + self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/AddExecutionEvents', + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs['add_execution_events'] + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_execution_inputs_and_outputs' not in self._stubs: + self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs', + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_execution_inputs_and_outputs'] + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Awaitable[gca_metadata_schema.MetadataSchema]]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_schema' not in self._stubs: + self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataSchema', + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['create_metadata_schema'] + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Awaitable[metadata_schema.MetadataSchema]]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_schema' not in self._stubs: + self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/GetMetadataSchema', + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['get_metadata_schema'] + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Awaitable[metadata_service.ListMetadataSchemasResponse]]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + Awaitable[~.ListMetadataSchemasResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_schemas' not in self._stubs: + self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/ListMetadataSchemas', + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs['list_metadata_schemas'] + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_artifact_lineage_subgraph' not in self._stubs: + self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph', + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_artifact_lineage_subgraph'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'MetadataServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py new file mode 100644 index 0000000000..bb6f4258df --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MigrationServiceClient +from .async_client import MigrationServiceAsyncClient + +__all__ = ( + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py new file mode 100644 index 0000000000..cc3d8ee7ab --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -0,0 +1,1109 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.migration_service import pagers +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport +from .client import MigrationServiceClient + + +class MigrationServiceAsyncClient: + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + _client: MigrationServiceClient + + DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT + + annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) + parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + version_path = staticmethod(MigrationServiceClient.version_path) + parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) + common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) + common_project_path = staticmethod(MigrationServiceClient.common_project_path) + parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) + common_location_path = staticmethod(MigrationServiceClient.common_location_path) + parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return MigrationServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MigrationServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def search_migratable_resources(self, + request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesAsyncPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest, dict]): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + parent (:class:`str`): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.SearchMigratableResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_migratable_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchMigratableResourcesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_migrate_resources(self, + request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest, dict]): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + parent (:class:`str`): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]`): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.BatchMigrateResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests: + request.migrate_resource_requests.extend(migrate_resource_requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_migrate_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py new file mode 100644 index 0000000000..b921e0bab8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -0,0 +1,1357 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.migration_service import pagers +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MigrationServiceGrpcTransport +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +class MigrationServiceClientMeta(type): + """Metaclass for the MigrationService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] + _transport_registry["grpc"] = MigrationServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MigrationServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MigrationServiceClient(metaclass=MigrationServiceClientMeta): + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: + """Returns a fully-qualified annotated_dataset string.""" + return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + + @staticmethod + def parse_annotated_dataset_path(path: str) -> Dict[str,str]: + """Parses a annotated_dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def version_path(project: str,model: str,version: str,) -> str: + """Returns a fully-qualified version string.""" + return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + + @staticmethod + def parse_version_path(path: str) -> Dict[str,str]: + """Parses a version path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MigrationServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MigrationServiceTransport): + # transport is a MigrationServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def search_migratable_resources(self, + request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest, dict]): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + parent (str): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.SearchMigratableResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.SearchMigratableResourcesRequest): + request = migration_service.SearchMigratableResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchMigratableResourcesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_migrate_resources(self, + request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest, dict]): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + parent (str): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.BatchMigrateResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.BatchMigrateResourcesRequest): + request = migration_service.BatchMigrateResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests is not None: + request.migrate_resource_requests = migrate_resource_requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py new file mode 100644 index 0000000000..ff797b2b5d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service + + +class SearchMigratableResourcesPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., migration_service.SearchMigratableResourcesResponse], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[migratable_resource.MigratableResource]: + for page in self.pages: + yield from page.migratable_resources + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchMigratableResourcesAsyncPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[migratable_resource.MigratableResource]: + async def async_generator(): + async for page in self.pages: + for response in page.migratable_resources: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py new file mode 100644 index 0000000000..2176366c5e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MigrationServiceTransport +from .grpc import MigrationServiceGrpcTransport +from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] +_transport_registry['grpc'] = MigrationServiceGrpcTransport +_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport + +__all__ = ( + 'MigrationServiceTransport', + 'MigrationServiceGrpcTransport', + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py new file mode 100644 index 0000000000..8cb09749c1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MigrationServiceTransport(abc.ABC): + """Abstract transport class for MigrationService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.search_migratable_resources: gapic_v1.method.wrap_method( + self.search_migratable_resources, + default_timeout=None, + client_info=client_info, + ), + self.batch_migrate_resources: gapic_v1.method.wrap_method( + self.batch_migrate_resources, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Union[ + migration_service.SearchMigratableResourcesResponse, + Awaitable[migration_service.SearchMigratableResourcesResponse] + ]]: + raise NotImplementedError() + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'MigrationServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py new file mode 100644 index 0000000000..8373972d5f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -0,0 +1,522 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO + + +class MigrationServiceGrpcTransport(MigrationServiceTransport): + """gRPC backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + migration_service.SearchMigratableResourcesResponse]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + ~.SearchMigratableResourcesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'MigrationServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..72940827c8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py @@ -0,0 +1,521 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MigrationServiceGrpcTransport + + +class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): + """gRPC AsyncIO backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Awaitable[migration_service.SearchMigratableResourcesResponse]]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + Awaitable[~.SearchMigratableResourcesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py new file mode 100644 index 0000000000..c95c75b700 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ModelServiceClient +from .async_client import ModelServiceAsyncClient + +__all__ = ( + 'ModelServiceClient', + 'ModelServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py new file mode 100644 index 0000000000..9ef9bf0501 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -0,0 +1,2457 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.model_service import pagers +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .client import ModelServiceClient + + +class ModelServiceAsyncClient: + """A service for managing Vertex AI's machine learning Models.""" + + _client: ModelServiceClient + + DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(ModelServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) + model_path = staticmethod(ModelServiceClient.model_path) + parse_model_path = staticmethod(ModelServiceClient.parse_model_path) + model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) + parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) + model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) + parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) + training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ModelServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(ModelServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) + common_project_path = staticmethod(ModelServiceClient.common_project_path) + parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) + common_location_path = staticmethod(ModelServiceClient.common_location_path) + parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ModelServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ModelServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ModelServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def upload_model(self, + request: Union[model_service.UploadModelRequest, dict] = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Uploads a Model artifact into Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_upload_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UploadModelRequest, dict]): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. + parent (:class:`str`): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`google.cloud.aiplatform_v1.types.Model`): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UploadModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.upload_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model(self, + request: Union[model_service.GetModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelRequest, dict]): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. + name (:class:`str`): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + In order to retrieve a specific version of the model, + also provide the version ID or version alias. Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the "default" + version will be returned. The "default" version alias is + created for the first version of the model, and can be + moved to other versions later on. There will be exactly + one default version. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models(self, + request: Union[model_service.ListModelsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists Models in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_models(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelsRequest, dict]): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_versions(self, + request: Union[model_service.ListModelVersionsRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelVersionsAsyncPager: + r"""Lists versions of the specified model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_model_versions(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelVersionsRequest, dict]): + The request object. Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1.ModelService.ListModelVersions]. + name (:class:`str`): + Required. The name of the model to + list versions for. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsAsyncPager: + Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1.ModelService.ListModelVersions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelVersionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_versions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelVersionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model(self, + request: Union[model_service.UpdateModelRequest, dict] = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateModelRequest, dict]): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. + model (:class:`google.cloud.aiplatform_v1.types.Model`): + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UpdateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model(self, + request: Union[model_service.DeleteModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. + name (:class:`str`): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_model_version(self, + request: Union[model_service.DeleteModelVersionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] + for deleting the Model instead. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_model_version(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteModelVersionRequest, dict]): + The request object. Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1.ModelService.DeleteModelVersion]. + name (:class:`str`): + Required. The name of the model version to be deleted, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.DeleteModelVersionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_version, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def merge_version_aliases(self, + request: Union[model_service.MergeVersionAliasesRequest, dict] = None, + *, + name: str = None, + version_aliases: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Merges a set of aliases for a Model version. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = await client.merge_version_aliases(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest, dict]): + The request object. Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1.ModelService.MergeVersionAliases]. + name (:class:`str`): + Required. The name of the model version to merge + aliases, with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + version_aliases (:class:`Sequence[str]`): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix + to an alias means removing that alias from the version. + ``-`` is NOT counted in the 128 characters. Example: + ``-golden`` means removing the ``golden`` alias from the + version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have + the exactly same order from this MergeVersionAliases + API. 2) Adding and deleting the same alias in the + request is not recommended, and the 2 operations will + be cancelled out. + + This corresponds to the ``version_aliases`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, version_aliases]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.MergeVersionAliasesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if version_aliases: + request.version_aliases.extend(version_aliases) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.merge_version_aliases, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def export_model(self, + request: Union[model_service.ExportModelRequest, dict] = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_export_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportModelRequest, dict]): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. + name (:class:`str`): + Required. The resource name of the + Model to export. The resource name may + contain version id or version alias to + specify the version, if no version is + specified, the default version will be + exported. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig`): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_model_evaluation(self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = await client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation] + parent (:class:`str`): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (:class:`google.cloud.aiplatform_v1.types.ModelEvaluation`): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ImportModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_model_evaluation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation(self, + request: Union[model_service.GetModelEvaluationRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. + name (:class:`str`): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluations(self, + request: Union[model_service.ListModelEvaluationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists ModelEvaluations in a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + parent (:class:`str`): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation_slice(self, + request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. + name (:class:`str`): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationSliceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation_slice, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluation_slices(self, + request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesAsyncPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + parent (:class:`str`): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationSlicesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluation_slices, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationSlicesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py new file mode 100644 index 0000000000..5612c2dc9f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py @@ -0,0 +1,2687 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.model_service import pagers +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ModelServiceGrpcTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +class ModelServiceClientMeta(type): + """Metaclass for the ModelService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] + _transport_registry["grpc"] = ModelServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ModelServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ModelServiceClient(metaclass=ModelServiceClientMeta): + """A service for managing Vertex AI's machine learning Models.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: + """Returns a fully-qualified model_evaluation string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + + @staticmethod + def parse_model_evaluation_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: + """Returns a fully-qualified model_evaluation_slice string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + + @staticmethod + def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation_slice path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ModelServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ModelServiceTransport): + # transport is a ModelServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def upload_model(self, + request: Union[model_service.UploadModelRequest, dict] = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Uploads a Model artifact into Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_upload_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UploadModelRequest, dict]): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. + parent (str): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UploadModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UploadModelRequest): + request = model_service.UploadModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.upload_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + def get_model(self, + request: Union[model_service.GetModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelRequest, dict]): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + In order to retrieve a specific version of the model, + also provide the version ID or version alias. Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the "default" + version will be returned. The "default" version alias is + created for the first version of the model, and can be + moved to other versions later on. There will be exactly + one default version. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelRequest): + request = model_service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models(self, + request: Union[model_service.ListModelsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists Models in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_models(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelsRequest, dict]): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelsRequest): + request = model_service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_versions(self, + request: Union[model_service.ListModelVersionsRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelVersionsPager: + r"""Lists versions of the specified model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_model_versions(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelVersionsRequest, dict]): + The request object. Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1.ModelService.ListModelVersions]. + name (str): + Required. The name of the model to + list versions for. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsPager: + Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1.ModelService.ListModelVersions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelVersionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelVersionsRequest): + request = model_service.ListModelVersionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_versions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelVersionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model(self, + request: Union[model_service.UpdateModelRequest, dict] = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = client.update_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateModelRequest, dict]): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UpdateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UpdateModelRequest): + request = model_service.UpdateModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model(self, + request: Union[model_service.DeleteModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteModelRequest): + request = model_service.DeleteModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_model_version(self, + request: Union[model_service.DeleteModelVersionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] + for deleting the Model instead. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_model_version(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteModelVersionRequest, dict]): + The request object. Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1.ModelService.DeleteModelVersion]. + name (str): + Required. The name of the model version to be deleted, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteModelVersionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteModelVersionRequest): + request = model_service.DeleteModelVersionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model_version] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def merge_version_aliases(self, + request: Union[model_service.MergeVersionAliasesRequest, dict] = None, + *, + name: str = None, + version_aliases: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Merges a set of aliases for a Model version. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = client.merge_version_aliases(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest, dict]): + The request object. Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1.ModelService.MergeVersionAliases]. + name (str): + Required. The name of the model version to merge + aliases, with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + version_aliases (Sequence[str]): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix + to an alias means removing that alias from the version. + ``-`` is NOT counted in the 128 characters. Example: + ``-golden`` means removing the ``golden`` alias from the + version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have + the exactly same order from this MergeVersionAliases + API. 2) Adding and deleting the same alias in the + request is not recommended, and the 2 operations will + be cancelled out. + + This corresponds to the ``version_aliases`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, version_aliases]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.MergeVersionAliasesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.MergeVersionAliasesRequest): + request = model_service.MergeVersionAliasesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if version_aliases is not None: + request.version_aliases = version_aliases + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.merge_version_aliases] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def export_model(self, + request: Union[model_service.ExportModelRequest, dict] = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_export_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportModelRequest, dict]): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. + name (str): + Required. The resource name of the + Model to export. The resource name may + contain version id or version alias to + specify the version, if no version is + specified, the default version will be + exported. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ExportModelRequest): + request = model_service.ExportModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + def import_model_evaluation(self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation] + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (google.cloud.aiplatform_v1.types.ModelEvaluation): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ImportModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ImportModelEvaluationRequest): + request = model_service.ImportModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation(self, + request: Union[model_service.GetModelEvaluationRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. + name (str): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationRequest): + request = model_service.GetModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluations(self, + request: Union[model_service.ListModelEvaluationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists ModelEvaluations in a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationsRequest): + request = model_service.ListModelEvaluationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation_slice(self, + request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationSliceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationSliceRequest): + request = model_service.GetModelEvaluationSliceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluation_slices(self, + request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + parent (str): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationSlicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): + request = model_service.ListModelEvaluationSlicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationSlicesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py new file mode 100644 index 0000000000..8f85c4c65b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py @@ -0,0 +1,505 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelsResponse], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelsResponse]], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelVersionsPager: + """A pager for iterating through ``list_model_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelVersionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelVersions`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelVersionsResponse], + request: model_service.ListModelVersionsRequest, + response: model_service.ListModelVersionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelVersionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelVersionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelVersionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelVersionsAsyncPager: + """A pager for iterating through ``list_model_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelVersionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelVersions`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelVersionsResponse]], + request: model_service.ListModelVersionsRequest, + response: model_service.ListModelVersionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelVersionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelVersionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelVersionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationsResponse], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationSlicesResponse], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_evaluation_slice.ModelEvaluationSlice]: + for page in self.pages: + yield from page.model_evaluation_slices + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesAsyncPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_evaluation_slice.ModelEvaluationSlice]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation_slices: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py new file mode 100644 index 0000000000..70c39ff384 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ModelServiceTransport +from .grpc import ModelServiceGrpcTransport +from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] +_transport_registry['grpc'] = ModelServiceGrpcTransport +_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport + +__all__ = ( + 'ModelServiceTransport', + 'ModelServiceGrpcTransport', + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py new file mode 100644 index 0000000000..4daa4b51f0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -0,0 +1,439 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ModelServiceTransport(abc.ABC): + """Abstract transport class for ModelService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.upload_model: gapic_v1.method.wrap_method( + self.upload_model, + default_timeout=None, + client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_timeout=None, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_timeout=None, + client_info=client_info, + ), + self.list_model_versions: gapic_v1.method.wrap_method( + self.list_model_versions, + default_timeout=None, + client_info=client_info, + ), + self.update_model: gapic_v1.method.wrap_method( + self.update_model, + default_timeout=None, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_timeout=None, + client_info=client_info, + ), + self.delete_model_version: gapic_v1.method.wrap_method( + self.delete_model_version, + default_timeout=None, + client_info=client_info, + ), + self.merge_version_aliases: gapic_v1.method.wrap_method( + self.merge_version_aliases, + default_timeout=None, + client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, + default_timeout=None, + client_info=client_info, + ), + self.import_model_evaluation: gapic_v1.method.wrap_method( + self.import_model_evaluation, + default_timeout=None, + client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_timeout=None, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_timeout=None, + client_info=client_info, + ), + self.get_model_evaluation_slice: gapic_v1.method.wrap_method( + self.get_model_evaluation_slice, + default_timeout=None, + client_info=client_info, + ), + self.list_model_evaluation_slices: gapic_v1.method.wrap_method( + self.list_model_evaluation_slices, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Union[ + model_service.ListModelsResponse, + Awaitable[model_service.ListModelsResponse] + ]]: + raise NotImplementedError() + + @property + def list_model_versions(self) -> Callable[ + [model_service.ListModelVersionsRequest], + Union[ + model_service.ListModelVersionsResponse, + Awaitable[model_service.ListModelVersionsResponse] + ]]: + raise NotImplementedError() + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Union[ + gca_model.Model, + Awaitable[gca_model.Model] + ]]: + raise NotImplementedError() + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_model_version(self) -> Callable[ + [model_service.DeleteModelVersionRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def merge_version_aliases(self) -> Callable[ + [model_service.MergeVersionAliasesRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_model_evaluation(self) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Union[ + gca_model_evaluation.ModelEvaluation, + Awaitable[gca_model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Union[ + model_evaluation.ModelEvaluation, + Awaitable[model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Union[ + model_service.ListModelEvaluationsResponse, + Awaitable[model_service.ListModelEvaluationsResponse] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Union[ + model_evaluation_slice.ModelEvaluationSlice, + Awaitable[model_evaluation_slice.ModelEvaluationSlice] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Union[ + model_service.ListModelEvaluationSlicesResponse, + Awaitable[model_service.ListModelEvaluationSlicesResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'ModelServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py new file mode 100644 index 0000000000..5ea4953fa1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -0,0 +1,849 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO + + +class ModelServiceGrpcTransport(ModelServiceTransport): + """gRPC backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + model_service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def list_model_versions(self) -> Callable[ + [model_service.ListModelVersionsRequest], + model_service.ListModelVersionsResponse]: + r"""Return a callable for the list model versions method over gRPC. + + Lists versions of the specified model. + + Returns: + Callable[[~.ListModelVersionsRequest], + ~.ListModelVersionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_versions' not in self._stubs: + self._stubs['list_model_versions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelVersions', + request_serializer=model_service.ListModelVersionsRequest.serialize, + response_deserializer=model_service.ListModelVersionsResponse.deserialize, + ) + return self._stubs['list_model_versions'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + gca_model.Model]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def delete_model_version(self) -> Callable[ + [model_service.DeleteModelVersionRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model version method over gRPC. + + Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] + for deleting the Model instead. + + Returns: + Callable[[~.DeleteModelVersionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_version' not in self._stubs: + self._stubs['delete_model_version'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/DeleteModelVersion', + request_serializer=model_service.DeleteModelVersionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_version'] + + @property + def merge_version_aliases(self) -> Callable[ + [model_service.MergeVersionAliasesRequest], + model.Model]: + r"""Return a callable for the merge version aliases method over gRPC. + + Merges a set of aliases for a Model version. + + Returns: + Callable[[~.MergeVersionAliasesRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'merge_version_aliases' not in self._stubs: + self._stubs['merge_version_aliases'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/MergeVersionAliases', + request_serializer=model_service.MergeVersionAliasesRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['merge_version_aliases'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def import_model_evaluation(self) -> Callable[ + [model_service.ImportModelEvaluationRequest], + gca_model_evaluation.ModelEvaluation]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_model_evaluation' not in self._stubs: + self._stubs['import_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ImportModelEvaluation', + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['import_model_evaluation'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + model_service.ListModelEvaluationsResponse]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + model_evaluation_slice.ModelEvaluationSlice]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + ~.ModelEvaluationSlice]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + model_service.ListModelEvaluationSlicesResponse]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + ~.ListModelEvaluationSlicesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'ModelServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..0a47f38dc7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -0,0 +1,848 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ModelServiceGrpcTransport + + +class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): + """gRPC AsyncIO backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Awaitable[model_service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def list_model_versions(self) -> Callable[ + [model_service.ListModelVersionsRequest], + Awaitable[model_service.ListModelVersionsResponse]]: + r"""Return a callable for the list model versions method over gRPC. + + Lists versions of the specified model. + + Returns: + Callable[[~.ListModelVersionsRequest], + Awaitable[~.ListModelVersionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_versions' not in self._stubs: + self._stubs['list_model_versions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelVersions', + request_serializer=model_service.ListModelVersionsRequest.serialize, + response_deserializer=model_service.ListModelVersionsResponse.deserialize, + ) + return self._stubs['list_model_versions'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Awaitable[gca_model.Model]]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def delete_model_version(self) -> Callable[ + [model_service.DeleteModelVersionRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model version method over gRPC. + + Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] + for deleting the Model instead. + + Returns: + Callable[[~.DeleteModelVersionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_version' not in self._stubs: + self._stubs['delete_model_version'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/DeleteModelVersion', + request_serializer=model_service.DeleteModelVersionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_version'] + + @property + def merge_version_aliases(self) -> Callable[ + [model_service.MergeVersionAliasesRequest], + Awaitable[model.Model]]: + r"""Return a callable for the merge version aliases method over gRPC. + + Merges a set of aliases for a Model version. + + Returns: + Callable[[~.MergeVersionAliasesRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'merge_version_aliases' not in self._stubs: + self._stubs['merge_version_aliases'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/MergeVersionAliases', + request_serializer=model_service.MergeVersionAliasesRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['merge_version_aliases'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def import_model_evaluation(self) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Awaitable[gca_model_evaluation.ModelEvaluation]]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_model_evaluation' not in self._stubs: + self._stubs['import_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ImportModelEvaluation', + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['import_model_evaluation'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Awaitable[model_evaluation.ModelEvaluation]]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Awaitable[model_service.ListModelEvaluationsResponse]]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + Awaitable[~.ModelEvaluationSlice]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Awaitable[model_service.ListModelEvaluationSlicesResponse]]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + Awaitable[~.ListModelEvaluationSlicesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py new file mode 100644 index 0000000000..88e994d789 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PipelineServiceClient +from .async_client import PipelineServiceAsyncClient + +__all__ = ( + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py new file mode 100644 index 0000000000..40564c1949 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -0,0 +1,1953 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport +from .client import PipelineServiceClient + + +class PipelineServiceAsyncClient: + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + """ + + _client: PipelineServiceClient + + DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(PipelineServiceClient.artifact_path) + parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) + context_path = staticmethod(PipelineServiceClient.context_path) + parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) + custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) + endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) + execution_path = staticmethod(PipelineServiceClient.execution_path) + parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) + model_path = staticmethod(PipelineServiceClient.model_path) + parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) + network_path = staticmethod(PipelineServiceClient.network_path) + parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) + training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PipelineServiceClient.common_project_path) + parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) + common_location_path = staticmethod(PipelineServiceClient.common_location_path) + parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PipelineServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PipelineServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_training_pipeline(self, + request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = await client.create_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. + parent (:class:`str`): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (:class:`google.cloud.aiplatform_v1.types.TrainingPipeline`): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreateTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_training_pipeline, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_training_pipeline(self, + request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = await client.get_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_training_pipeline, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_training_pipelines(self, + request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesAsyncPager: + r"""Lists TrainingPipelines in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest, dict]): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. + parent (:class:`str`): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListTrainingPipelinesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_training_pipelines, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTrainingPipelinesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_training_pipeline(self, + request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeleteTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_training_pipeline, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_training_pipeline(self, + request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + await client.cancel_training_pipeline(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_training_pipeline, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_pipeline_job(self, + request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreatePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (:class:`google.cloud.aiplatform_v1.types.PipelineJob`): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (:class:`str`): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_pipeline_job(self, + request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_pipeline_jobs(self, + request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsAsyncPager: + r"""Lists PipelineJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListPipelineJobsRequest, dict]): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_pipeline_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPipelineJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_pipeline_job(self, + request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeletePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_pipeline_job(self, + request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_pipeline_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py new file mode 100644 index 0000000000..3ada360c16 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -0,0 +1,2219 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PipelineServiceGrpcTransport +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +class PipelineServiceClientMeta(type): + """Metaclass for the PipelineService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] + _transport_registry["grpc"] = PipelineServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PipelineServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PipelineServiceClient(metaclass=PipelineServiceClientMeta): + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str,str]: + """Parses a artifact path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str,str]: + """Parses a context path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str,str]: + """Parses a execution path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: + """Returns a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str,str]: + """Parses a pipeline_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PipelineServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PipelineServiceTransport): + # transport is a PipelineServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_training_pipeline(self, + request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = client.create_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. + parent (str): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreateTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): + request = pipeline_service.CreateTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_training_pipeline(self, + request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = client.get_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): + request = pipeline_service.GetTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_training_pipelines(self, + request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesPager: + r"""Lists TrainingPipelines in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest, dict]): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListTrainingPipelinesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): + request = pipeline_service.ListTrainingPipelinesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTrainingPipelinesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_training_pipeline(self, + request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeleteTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): + request = pipeline_service.DeleteTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_training_pipeline(self, + request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + client.cancel_training_pipeline(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): + request = pipeline_service.CancelTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_pipeline_job(self, + request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreatePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (str): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreatePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreatePipelineJobRequest): + request = pipeline_service.CreatePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_pipeline_job(self, + request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetPipelineJobRequest): + request = pipeline_service.GetPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_pipeline_jobs(self, + request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsPager: + r"""Lists PipelineJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListPipelineJobsRequest, dict]): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListPipelineJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListPipelineJobsRequest): + request = pipeline_service.ListPipelineJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPipelineJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_pipeline_job(self, + request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeletePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeletePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeletePipelineJobRequest): + request = pipeline_service.DeletePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_pipeline_job(self, + request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_pipeline_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelPipelineJobRequest): + request = pipeline_service.CancelPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py new file mode 100644 index 0000000000..6652912104 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline + + +class ListTrainingPipelinesPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[training_pipeline.TrainingPipeline]: + for page in self.pages: + yield from page.training_pipelines + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrainingPipelinesAsyncPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[training_pipeline.TrainingPipeline]: + async def async_generator(): + async for page in self.pages: + for response in page.training_pipelines: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListPipelineJobsResponse], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[pipeline_job.PipelineJob]: + for page in self.pages: + yield from page.pipeline_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsAsyncPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[pipeline_job.PipelineJob]: + async def async_generator(): + async for page in self.pages: + for response in page.pipeline_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py new file mode 100644 index 0000000000..da3e9606ee --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PipelineServiceTransport +from .grpc import PipelineServiceGrpcTransport +from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] +_transport_registry['grpc'] = PipelineServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport + +__all__ = ( + 'PipelineServiceTransport', + 'PipelineServiceGrpcTransport', + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py new file mode 100644 index 0000000000..2c4c785f4a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PipelineServiceTransport(abc.ABC): + """Abstract transport class for PipelineService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_training_pipeline: gapic_v1.method.wrap_method( + self.create_training_pipeline, + default_timeout=None, + client_info=client_info, + ), + self.get_training_pipeline: gapic_v1.method.wrap_method( + self.get_training_pipeline, + default_timeout=None, + client_info=client_info, + ), + self.list_training_pipelines: gapic_v1.method.wrap_method( + self.list_training_pipelines, + default_timeout=None, + client_info=client_info, + ), + self.delete_training_pipeline: gapic_v1.method.wrap_method( + self.delete_training_pipeline, + default_timeout=None, + client_info=client_info, + ), + self.cancel_training_pipeline: gapic_v1.method.wrap_method( + self.cancel_training_pipeline, + default_timeout=None, + client_info=client_info, + ), + self.create_pipeline_job: gapic_v1.method.wrap_method( + self.create_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.get_pipeline_job: gapic_v1.method.wrap_method( + self.get_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.list_pipeline_jobs: gapic_v1.method.wrap_method( + self.list_pipeline_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_pipeline_job: gapic_v1.method.wrap_method( + self.delete_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_pipeline_job: gapic_v1.method.wrap_method( + self.cancel_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Union[ + gca_training_pipeline.TrainingPipeline, + Awaitable[gca_training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Union[ + training_pipeline.TrainingPipeline, + Awaitable[training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Union[ + pipeline_service.ListTrainingPipelinesResponse, + Awaitable[pipeline_service.ListTrainingPipelinesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Union[ + gca_pipeline_job.PipelineJob, + Awaitable[gca_pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Union[ + pipeline_job.PipelineJob, + Awaitable[pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Union[ + pipeline_service.ListPipelineJobsResponse, + Awaitable[pipeline_service.ListPipelineJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'PipelineServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py new file mode 100644 index 0000000000..34e7e90704 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -0,0 +1,758 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO + + +class PipelineServiceGrpcTransport(PipelineServiceTransport): + """gRPC backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + gca_training_pipeline.TrainingPipeline]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + training_pipeline.TrainingPipeline]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + pipeline_service.ListTrainingPipelinesResponse]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + ~.ListTrainingPipelinesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + gca_pipeline_job.PipelineJob]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + pipeline_job.PipelineJob]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + pipeline_service.ListPipelineJobsResponse]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + ~.ListPipelineJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'PipelineServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..ed9fff3cd8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -0,0 +1,757 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PipelineServiceGrpcTransport + + +class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): + """gRPC AsyncIO backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Awaitable[gca_training_pipeline.TrainingPipeline]]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Awaitable[training_pipeline.TrainingPipeline]]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + Awaitable[~.ListTrainingPipelinesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Awaitable[gca_pipeline_job.PipelineJob]]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Awaitable[pipeline_job.PipelineJob]]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Awaitable[pipeline_service.ListPipelineJobsResponse]]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + Awaitable[~.ListPipelineJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py new file mode 100644 index 0000000000..7c079c80cc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient + +__all__ = ( + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py new file mode 100644 index 0000000000..060d126371 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -0,0 +1,1316 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """A service for online predictions and explanations.""" + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) + model_path = staticmethod(PredictionServiceClient.model_path) + parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) + common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PredictionServiceClient.common_project_path) + parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) + common_location_path = staticmethod(PredictionServiceClient.common_location_path) + parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PredictionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def predict(self, + request: Union[prediction_service.PredictRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PredictRequest, dict]): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if instances: + request.instances.extend(instances) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def raw_predict(self, + request: Union[prediction_service.RawPredictRequest, dict] = None, + *, + endpoint: str = None, + http_body: httpbody_pb2.HttpBody = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> httpbody_pb2.HttpBody: + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_raw_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = await client.raw_predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.RawPredictRequest, dict]): + The request object. Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (:class:`google.api.httpbody_pb2.HttpBody`): + The prediction input. Supports HTTP headers and + arbitrary data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for + an AutoML model, the + [RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for + a custom-trained model, the behavior varies depending on + the model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1.Model]. This schema + applies when you deploy the ``Model`` as a + ``DeployedModel`` to an + [Endpoint][google.cloud.aiplatform.v1.Endpoint] and use + the ``RawPredict`` method. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api.httpbody_pb2.HttpBody: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) + returns (google.api.HttpBody); + + rpc UpdateResource(google.api.HttpBody) + returns (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.RawPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.raw_predict, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def explain(self, + request: Union[prediction_service.ExplainRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.ExplainResponse: + r"""Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_explain(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.explain(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExplainRequest, dict]): + The request object. Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper + limit on the number of instances it supports per + request, and when it is exceeded the explanation call + errors in case of AutoML Models, or, in case of customer + created Models, the behaviour is as documented by that + Model. The schema of any single instance may be + specified via Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ExplainResponse: + Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.ExplainRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + if instances: + request.instances.extend(instances) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.explain, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py new file mode 100644 index 0000000000..0ee5fca048 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -0,0 +1,1519 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PredictionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """A service for online predictions and explanations.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PredictionServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def predict(self, + request: Union[prediction_service.PredictRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.PredictRequest, dict]): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if instances is not None: + request.instances.extend(instances) + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def raw_predict(self, + request: Union[prediction_service.RawPredictRequest, dict] = None, + *, + endpoint: str = None, + http_body: httpbody_pb2.HttpBody = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> httpbody_pb2.HttpBody: + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_raw_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = client.raw_predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.RawPredictRequest, dict]): + The request object. Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (google.api.httpbody_pb2.HttpBody): + The prediction input. Supports HTTP headers and + arbitrary data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for + an AutoML model, the + [RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for + a custom-trained model, the behavior varies depending on + the model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1.Model]. This schema + applies when you deploy the ``Model`` as a + ``DeployedModel`` to an + [Endpoint][google.cloud.aiplatform.v1.Endpoint] and use + the ``RawPredict`` method. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api.httpbody_pb2.HttpBody: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) + returns (google.api.HttpBody); + + rpc UpdateResource(google.api.HttpBody) + returns (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.RawPredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.RawPredictRequest): + request = prediction_service.RawPredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.raw_predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def explain(self, + request: Union[prediction_service.ExplainRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.ExplainResponse: + r"""Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_explain(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.explain(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExplainRequest, dict]): + The request object. Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper + limit on the number of instances it supports per + request, and when it is exceeded the explanation call + errors in case of AutoML Models, or, in case of customer + created Models, the behaviour is as documented by that + Model. The schema of any single instance may be + specified via Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ExplainResponse: + Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.ExplainRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.ExplainRequest): + request = prediction_service.ExplainRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if instances is not None: + request.instances.extend(instances) + if parameters is not None: + request.parameters = parameters + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.explain] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py new file mode 100644 index 0000000000..bb65c664b5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry['grpc'] = PredictionServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport + +__all__ = ( + 'PredictionServiceTransport', + 'PredictionServiceGrpcTransport', + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py new file mode 100644 index 0000000000..587c863566 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, + default_timeout=None, + client_info=client_info, + ), + self.raw_predict: gapic_v1.method.wrap_method( + self.raw_predict, + default_timeout=None, + client_info=client_info, + ), + self.explain: gapic_v1.method.wrap_method( + self.explain, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Union[ + prediction_service.PredictResponse, + Awaitable[prediction_service.PredictResponse] + ]]: + raise NotImplementedError() + + @property + def raw_predict(self) -> Callable[ + [prediction_service.RawPredictRequest], + Union[ + httpbody_pb2.HttpBody, + Awaitable[httpbody_pb2.HttpBody] + ]]: + raise NotImplementedError() + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + Union[ + prediction_service.ExplainResponse, + Awaitable[prediction_service.ExplainResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'PredictionServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py new file mode 100644 index 0000000000..0011e05cfd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -0,0 +1,545 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def raw_predict(self) -> Callable[ + [prediction_service.RawPredictRequest], + httpbody_pb2.HttpBody]: + r"""Return a callable for the raw predict method over gRPC. + + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. + + Returns: + Callable[[~.RawPredictRequest], + ~.HttpBody]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'raw_predict' not in self._stubs: + self._stubs['raw_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/RawPredict', + request_serializer=prediction_service.RawPredictRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs['raw_predict'] + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + prediction_service.ExplainResponse]: + r"""Return a callable for the explain method over gRPC. + + Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Returns: + Callable[[~.ExplainRequest], + ~.ExplainResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'explain' not in self._stubs: + self._stubs['explain'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/Explain', + request_serializer=prediction_service.ExplainRequest.serialize, + response_deserializer=prediction_service.ExplainResponse.deserialize, + ) + return self._stubs['explain'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'PredictionServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..541c1ea6af --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,544 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse]]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def raw_predict(self) -> Callable[ + [prediction_service.RawPredictRequest], + Awaitable[httpbody_pb2.HttpBody]]: + r"""Return a callable for the raw predict method over gRPC. + + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + that served this prediction. + + Returns: + Callable[[~.RawPredictRequest], + Awaitable[~.HttpBody]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'raw_predict' not in self._stubs: + self._stubs['raw_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/RawPredict', + request_serializer=prediction_service.RawPredictRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs['raw_predict'] + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + Awaitable[prediction_service.ExplainResponse]]: + r"""Return a callable for the explain method over gRPC. + + Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Returns: + Callable[[~.ExplainRequest], + Awaitable[~.ExplainResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'explain' not in self._stubs: + self._stubs['explain'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/Explain', + request_serializer=prediction_service.ExplainRequest.serialize, + response_deserializer=prediction_service.ExplainResponse.deserialize, + ) + return self._stubs['explain'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py new file mode 100644 index 0000000000..9fb268e877 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SpecialistPoolServiceClient +from .async_client import SpecialistPoolServiceAsyncClient + +__all__ = ( + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py new file mode 100644 index 0000000000..9e2ae4b9d6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -0,0 +1,1452 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport +from .client import SpecialistPoolServiceClient + + +class SpecialistPoolServiceAsyncClient: + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + _client: SpecialistPoolServiceClient + + DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT + + specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) + parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) + common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) + common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) + parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) + common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) + parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return SpecialistPoolServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = SpecialistPoolServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_specialist_pool(self, + request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + parent (:class:`str`): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_specialist_pool, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_specialist_pool(self, + request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = await client.get_specialist_pool(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. + name (:class:`str`): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers and workers. + Managers are responsible for managing + the workers in this pool as well as + customers' data labeling jobs associated + with this pool. Customers create + specialist pool as well as start data + labeling jobs on Cloud, managers and + workers handle the jobs using + CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.GetSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_specialist_pool, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_specialist_pools(self, + request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsAsyncPager: + r"""Lists SpecialistPools in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + parent (:class:`str`): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_specialist_pools, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSpecialistPoolsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_specialist_pool(self, + request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. + name (:class:`str`): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_specialist_pool, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_specialist_pool(self, + request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_specialist_pool, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py new file mode 100644 index 0000000000..757a9e3225 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -0,0 +1,1646 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SpecialistPoolServiceGrpcTransport +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +class SpecialistPoolServiceClientMeta(type): + """Metaclass for the SpecialistPoolService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] + _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport + _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SpecialistPoolServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: + """Returns a fully-qualified specialist_pool string.""" + return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + + @staticmethod + def parse_specialist_pool_path(path: str) -> Dict[str,str]: + """Parses a specialist_pool path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SpecialistPoolServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpecialistPoolServiceTransport): + # transport is a SpecialistPoolServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_specialist_pool(self, + request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.CreateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + def get_specialist_pool(self, + request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = client.get_specialist_pool(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. + name (str): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers and workers. + Managers are responsible for managing + the workers in this pool as well as + customers' data labeling jobs associated + with this pool. Customers create + specialist pool as well as start data + labeling jobs on Cloud, managers and + workers handle the jobs using + CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.GetSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): + request = specialist_pool_service.GetSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_specialist_pools(self, + request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsPager: + r"""Lists SpecialistPools in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + parent (str): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.ListSpecialistPoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSpecialistPoolsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_specialist_pool(self, + request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. + name (str): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.DeleteSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def update_specialist_pool(self, + request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.UpdateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py new file mode 100644 index 0000000000..bb5e3ed0f5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service + + +class ListSpecialistPoolsPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[specialist_pool.SpecialistPool]: + for page in self.pages: + yield from page.specialist_pools + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListSpecialistPoolsAsyncPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[specialist_pool.SpecialistPool]: + async def async_generator(): + async for page in self.pages: + for response in page.specialist_pools: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py new file mode 100644 index 0000000000..8e501c9548 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpecialistPoolServiceTransport +from .grpc import SpecialistPoolServiceGrpcTransport +from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] +_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport +_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport + +__all__ = ( + 'SpecialistPoolServiceTransport', + 'SpecialistPoolServiceGrpcTransport', + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py new file mode 100644 index 0000000000..ca33800dc1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SpecialistPoolServiceTransport(abc.ABC): + """Abstract transport class for SpecialistPoolService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_specialist_pool: gapic_v1.method.wrap_method( + self.create_specialist_pool, + default_timeout=None, + client_info=client_info, + ), + self.get_specialist_pool: gapic_v1.method.wrap_method( + self.get_specialist_pool, + default_timeout=None, + client_info=client_info, + ), + self.list_specialist_pools: gapic_v1.method.wrap_method( + self.list_specialist_pools, + default_timeout=None, + client_info=client_info, + ), + self.delete_specialist_pool: gapic_v1.method.wrap_method( + self.delete_specialist_pool, + default_timeout=None, + client_info=client_info, + ), + self.update_specialist_pool: gapic_v1.method.wrap_method( + self.update_specialist_pool, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Union[ + specialist_pool.SpecialistPool, + Awaitable[specialist_pool.SpecialistPool] + ]]: + raise NotImplementedError() + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Union[ + specialist_pool_service.ListSpecialistPoolsResponse, + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'SpecialistPoolServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py new file mode 100644 index 0000000000..0f69cf6b14 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py @@ -0,0 +1,601 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO + + +class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): + """gRPC backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + specialist_pool.SpecialistPool]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + ~.SpecialistPool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + specialist_pool_service.ListSpecialistPoolsResponse]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + ~.ListSpecialistPoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'SpecialistPoolServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..a02883a2e2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -0,0 +1,600 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import SpecialistPoolServiceGrpcTransport + + +class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): + """gRPC AsyncIO backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Awaitable[specialist_pool.SpecialistPool]]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + Awaitable[~.SpecialistPool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + Awaitable[~.ListSpecialistPoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py new file mode 100644 index 0000000000..233bb9e47a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TensorboardServiceClient +from .async_client import TensorboardServiceAsyncClient + +__all__ = ( + 'TensorboardServiceClient', + 'TensorboardServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py new file mode 100644 index 0000000000..588f39bac1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py @@ -0,0 +1,3998 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import tensorboard +from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_data +from google.cloud.aiplatform_v1.types import tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_service +from google.cloud.aiplatform_v1.types import tensorboard_time_series +from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport +from .client import TensorboardServiceClient + + +class TensorboardServiceAsyncClient: + """TensorboardService""" + + _client: TensorboardServiceClient + + DEFAULT_ENDPOINT = TensorboardServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = TensorboardServiceClient.DEFAULT_MTLS_ENDPOINT + + tensorboard_path = staticmethod(TensorboardServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod(TensorboardServiceClient.parse_tensorboard_path) + tensorboard_experiment_path = staticmethod(TensorboardServiceClient.tensorboard_experiment_path) + parse_tensorboard_experiment_path = staticmethod(TensorboardServiceClient.parse_tensorboard_experiment_path) + tensorboard_run_path = staticmethod(TensorboardServiceClient.tensorboard_run_path) + parse_tensorboard_run_path = staticmethod(TensorboardServiceClient.parse_tensorboard_run_path) + tensorboard_time_series_path = staticmethod(TensorboardServiceClient.tensorboard_time_series_path) + parse_tensorboard_time_series_path = staticmethod(TensorboardServiceClient.parse_tensorboard_time_series_path) + common_billing_account_path = staticmethod(TensorboardServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(TensorboardServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(TensorboardServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(TensorboardServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(TensorboardServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(TensorboardServiceClient.parse_common_organization_path) + common_project_path = staticmethod(TensorboardServiceClient.common_project_path) + parse_common_project_path = staticmethod(TensorboardServiceClient.parse_common_project_path) + common_location_path = staticmethod(TensorboardServiceClient.common_location_path) + parse_common_location_path = staticmethod(TensorboardServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return TensorboardServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> TensorboardServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(TensorboardServiceClient).get_transport_class, type(TensorboardServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, TensorboardServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = TensorboardServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_tensorboard(self, + request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (:class:`google.cloud.aiplatform_v1.types.Tensorboard`): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard(self, + request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1.TensorboardService.GetTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Tensorboard: + Tensorboard is a physical database + that stores users' training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard(self, + request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard]. + tensorboard (:class:`google.cloud.aiplatform_v1.types.Tensorboard`): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard.name", request.tensorboard.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_tensorboards(self, + request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsAsyncPager: + r"""Lists Tensorboards in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. + parent (:class:`str`): + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboards, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard(self, + request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_experiment(self, + request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = await client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment]. + parent (:class:`str`): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1.types.TensorboardExperiment`): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (:class:`str`): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_experiment(self, + request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_experiment(self, + request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = await client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1.types.TensorboardExperiment`): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment.name", request.tensorboard_experiment.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_experiments(self, + request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsAsyncPager: + r"""Lists TensorboardExperiments in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. + parent (:class:`str`): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_experiments, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardExperimentsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_experiment(self, + request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_run(self, + request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = await client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (:class:`google.cloud.aiplatform_v1.types.TensorboardRun`): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (:class:`str`): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_create_tensorboard_runs(self, + request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: + r"""Batch create TensorboardRuns. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest + messages must match this field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]`): + Required. The request message + specifying the TensorboardRuns to + create. A maximum of 1000 + TensorboardRuns can be created in a + batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse: + Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_tensorboard_runs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_run(self, + request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_run(self, + request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = await client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (:class:`google.cloud.aiplatform_v1.types.TensorboardRun`): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run.name", request.tensorboard_run.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_runs(self, + request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsAsyncPager: + r"""Lists TensorboardRuns in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. + parent (:class:`str`): + Required. The resource name of the + TensorboardExperiment to list + TensorboardRuns. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_runs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardRunsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_run(self, + request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_create_tensorboard_time_series(self, + request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: + r"""Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in + the CreateTensorboardTimeSeriesRequest messages must be + sub resources of this TensorboardExperiment. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]`): + Required. The request message + specifying the TensorboardTimeSeries to + create. A maximum of 1000 + TensorboardTimeSeries can be created in + a batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse: + Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_time_series(self, + request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_time_series(self, + request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_time_series(self, + request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series.name", request.tensorboard_time_series.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_time_series(self, + request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesAsyncPager: + r"""Lists TensorboardTimeSeries in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardTimeSeriesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_time_series(self, + request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: + r"""Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = await client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + tensorboard (:class:`str`): + Required. The resource name of the Tensorboard + containing TensorboardTimeSeries to read data from. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard", request.tensorboard), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = await client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_blob_data(self, + request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, + *, + time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[tensorboard_service.ReadTensorboardBlobDataResponse]]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = await client.read_tensorboard_blob_data(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. + time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_blob_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("time_series", request.time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def write_tensorboard_experiment_data(self, + request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, + *, + tensorboard_experiment: str = None, + write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: + r"""Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = await client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. + tensorboard_experiment (:class:`str`): + Required. The resource name of the TensorboardExperiment + to write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + write_run_data_requests (:class:`Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]`): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + + This corresponds to the ``write_run_data_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse: + Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if write_run_data_requests: + request.write_run_data_requests.extend(write_run_data_requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_tensorboard_experiment_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment", request.tensorboard_experiment), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def write_tensorboard_run_data(self, + request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = await client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (:class:`str`): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (:class:`Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]`): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if time_series_data: + request.time_series_data.extend(time_series_data) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_tensorboard_run_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run", request.tensorboard_run), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def export_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataAsyncPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TensorboardServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py new file mode 100644 index 0000000000..2e6e949950 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py @@ -0,0 +1,4219 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import tensorboard +from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_data +from google.cloud.aiplatform_v1.types import tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_service +from google.cloud.aiplatform_v1.types import tensorboard_time_series +from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import TensorboardServiceGrpcTransport +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +class TensorboardServiceClientMeta(type): + """Metaclass for the TensorboardService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] + _transport_registry["grpc"] = TensorboardServiceGrpcTransport + _transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TensorboardServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): + """TensorboardService""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TensorboardServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: + """Returns a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str,str]: + """Parses a tensorboard path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_experiment_path(project: str,location: str,tensorboard: str,experiment: str,) -> str: + """Returns a fully-qualified tensorboard_experiment string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) + + @staticmethod + def parse_tensorboard_experiment_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_experiment path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_run_path(project: str,location: str,tensorboard: str,experiment: str,run: str,) -> str: + """Returns a fully-qualified tensorboard_run string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) + + @staticmethod + def parse_tensorboard_run_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_run path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_time_series_path(project: str,location: str,tensorboard: str,experiment: str,run: str,time_series: str,) -> str: + """Returns a fully-qualified tensorboard_time_series string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) + + @staticmethod + def parse_tensorboard_time_series_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_time_series path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TensorboardServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TensorboardServiceTransport): + # transport is a TensorboardServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_tensorboard(self, + request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard]. + parent (str): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRequest): + request = tensorboard_service.CreateTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def get_tensorboard(self, + request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1.TensorboardService.GetTensorboard]. + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Tensorboard: + Tensorboard is a physical database + that stores users' training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRequest): + request = tensorboard_service.GetTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard(self, + request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard]. + tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRequest): + request = tensorboard_service.UpdateTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard.name", request.tensorboard.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def list_tensorboards(self, + request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsPager: + r"""Lists Tensorboards in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. + parent (str): + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardsRequest): + request = tensorboard_service.ListTensorboardsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboards] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard(self, + request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard]. + name (str): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRequest): + request = tensorboard_service.DeleteTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_experiment(self, + request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment]. + parent (str): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardExperimentRequest): + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_experiment(self, + request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest): + request = tensorboard_service.GetTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_experiment(self, + request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardExperimentRequest): + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment.name", request.tensorboard_experiment.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_experiments(self, + request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsPager: + r"""Lists TensorboardExperiments in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. + parent (str): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardExperimentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardExperimentsRequest): + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_experiments] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardExperimentsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_experiment(self, + request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardExperimentRequest): + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_run(self, + request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest): + request = tensorboard_service.CreateTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_create_tensorboard_runs(self, + request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: + r"""Batch create TensorboardRuns. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest + messages must match this field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]): + Required. The request message + specifying the TensorboardRuns to + create. A maximum of 1000 + TensorboardRuns can be created in a + batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse: + Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchCreateTensorboardRunsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.BatchCreateTensorboardRunsRequest): + request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_runs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_run(self, + request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun]. + name (str): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRunRequest): + request = tensorboard_service.GetTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_run(self, + request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest): + request = tensorboard_service.UpdateTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run.name", request.tensorboard_run.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_runs(self, + request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsPager: + r"""Lists TensorboardRuns in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. + parent (str): + Required. The resource name of the + TensorboardExperiment to list + TensorboardRuns. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardRunsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest): + request = tensorboard_service.ListTensorboardRunsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardRunsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_run(self, + request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun]. + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest): + request = tensorboard_service.DeleteTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_create_tensorboard_time_series(self, + request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: + r"""Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in + the CreateTensorboardTimeSeriesRequest messages must be + sub resources of this TensorboardExperiment. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]): + Required. The request message + specifying the TensorboardTimeSeries to + create. A maximum of 1000 + TensorboardTimeSeries can be created in + a batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse: + Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchCreateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_time_series(self, + request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries]. + parent (str): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardTimeSeriesRequest): + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_time_series(self, + request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest): + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_time_series(self, + request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardTimeSeriesRequest): + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series.name", request.tensorboard_time_series.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_time_series(self, + request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesPager: + r"""Lists TensorboardTimeSeries in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. + parent (str): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardTimeSeriesRequest): + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardTimeSeriesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_time_series(self, + request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardTimeSeriesRequest): + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: + r"""Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + tensorboard (str): + Required. The resource name of the Tensorboard + containing TensorboardTimeSeries to read data from. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_read_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard", request.tensorboard), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest): + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_blob_data(self, + request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, + *, + time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = client.read_tensorboard_blob_data(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. + time_series (str): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardBlobDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest): + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_blob_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("time_series", request.time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def write_tensorboard_experiment_data(self, + request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, + *, + tensorboard_experiment: str = None, + write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: + r"""Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. + tensorboard_experiment (str): + Required. The resource name of the TensorboardExperiment + to write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + write_run_data_requests (Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + + This corresponds to the ``write_run_data_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse: + Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.WriteTensorboardExperimentDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.WriteTensorboardExperimentDataRequest): + request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if write_run_data_requests is not None: + request.write_run_data_requests = write_run_data_requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_experiment_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment", request.tensorboard_experiment), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def write_tensorboard_run_data(self, + request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (str): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.WriteTensorboardRunDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest): + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if time_series_data is not None: + request.time_series_data = time_series_data + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_run_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run", request.tensorboard_run), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def export_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest): + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TensorboardServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py new file mode 100644 index 0000000000..6ef2a0b903 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py @@ -0,0 +1,628 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_data +from google.cloud.aiplatform_v1.types import tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_service +from google.cloud.aiplatform_v1.types import tensorboard_time_series + + +class ListTensorboardsPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardsResponse], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard.Tensorboard]: + for page in self.pages: + yield from page.tensorboards + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardsAsyncPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardsResponse]], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard.Tensorboard]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboards: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardExperimentsResponse], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_experiment.TensorboardExperiment]: + for page in self.pages: + yield from page.tensorboard_experiments + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsAsyncPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_experiment.TensorboardExperiment]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_experiments: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardRunsResponse], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_run.TensorboardRun]: + for page in self.pages: + yield from page.tensorboard_runs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsAsyncPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardRunsResponse]], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_run.TensorboardRun]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_runs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardTimeSeriesResponse], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_time_series.TensorboardTimeSeries]: + for page in self.pages: + yield from page.tensorboard_time_series + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesAsyncPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_time_series.TensorboardTimeSeries]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_time_series: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__iter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ExportTensorboardTimeSeriesDataResponse], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_data.TimeSeriesDataPoint]: + for page in self.pages: + yield from page.time_series_data_points + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataAsyncPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_data.TimeSeriesDataPoint]: + async def async_generator(): + async for page in self.pages: + for response in page.time_series_data_points: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py new file mode 100644 index 0000000000..2c199b74f7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TensorboardServiceTransport +from .grpc import TensorboardServiceGrpcTransport +from .grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] +_transport_registry['grpc'] = TensorboardServiceGrpcTransport +_transport_registry['grpc_asyncio'] = TensorboardServiceGrpcAsyncIOTransport + +__all__ = ( + 'TensorboardServiceTransport', + 'TensorboardServiceGrpcTransport', + 'TensorboardServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py new file mode 100644 index 0000000000..53a95bae1f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_service +from google.cloud.aiplatform_v1.types import tensorboard_time_series +from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TensorboardServiceTransport(abc.ABC): + """Abstract transport class for TensorboardService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_tensorboard: gapic_v1.method.wrap_method( + self.create_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard: gapic_v1.method.wrap_method( + self.get_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard: gapic_v1.method.wrap_method( + self.update_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboards: gapic_v1.method.wrap_method( + self.list_tensorboards, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard: gapic_v1.method.wrap_method( + self.delete_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_experiment: gapic_v1.method.wrap_method( + self.create_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_experiment: gapic_v1.method.wrap_method( + self.get_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_experiment: gapic_v1.method.wrap_method( + self.update_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_experiments: gapic_v1.method.wrap_method( + self.list_tensorboard_experiments, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_experiment: gapic_v1.method.wrap_method( + self.delete_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_run: gapic_v1.method.wrap_method( + self.create_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.batch_create_tensorboard_runs: gapic_v1.method.wrap_method( + self.batch_create_tensorboard_runs, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_run: gapic_v1.method.wrap_method( + self.get_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_run: gapic_v1.method.wrap_method( + self.update_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_runs: gapic_v1.method.wrap_method( + self.list_tensorboard_runs, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_run: gapic_v1.method.wrap_method( + self.delete_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.batch_create_tensorboard_time_series: gapic_v1.method.wrap_method( + self.batch_create_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_time_series: gapic_v1.method.wrap_method( + self.create_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_time_series: gapic_v1.method.wrap_method( + self.get_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_time_series: gapic_v1.method.wrap_method( + self.update_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_time_series: gapic_v1.method.wrap_method( + self.list_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_time_series: gapic_v1.method.wrap_method( + self.delete_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.batch_read_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.batch_read_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.read_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_blob_data: gapic_v1.method.wrap_method( + self.read_tensorboard_blob_data, + default_timeout=None, + client_info=client_info, + ), + self.write_tensorboard_experiment_data: gapic_v1.method.wrap_method( + self.write_tensorboard_experiment_data, + default_timeout=None, + client_info=client_info, + ), + self.write_tensorboard_run_data: gapic_v1.method.wrap_method( + self.write_tensorboard_run_data, + default_timeout=None, + client_info=client_info, + ), + self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.export_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + Union[ + tensorboard.Tensorboard, + Awaitable[tensorboard.Tensorboard] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + Union[ + tensorboard_service.ListTensorboardsResponse, + Awaitable[tensorboard_service.ListTensorboardsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + Union[ + gca_tensorboard_experiment.TensorboardExperiment, + Awaitable[gca_tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + Union[ + tensorboard_experiment.TensorboardExperiment, + Awaitable[tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + Union[ + gca_tensorboard_experiment.TensorboardExperiment, + Awaitable[gca_tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + Union[ + tensorboard_service.ListTensorboardExperimentsResponse, + Awaitable[tensorboard_service.ListTensorboardExperimentsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + Union[ + gca_tensorboard_run.TensorboardRun, + Awaitable[gca_tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def batch_create_tensorboard_runs(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + Union[ + tensorboard_service.BatchCreateTensorboardRunsResponse, + Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + Union[ + tensorboard_run.TensorboardRun, + Awaitable[tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + Union[ + gca_tensorboard_run.TensorboardRun, + Awaitable[gca_tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + Union[ + tensorboard_service.ListTensorboardRunsResponse, + Awaitable[tensorboard_service.ListTensorboardRunsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + Union[ + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse, + Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + Union[ + tensorboard_time_series.TensorboardTimeSeries, + Awaitable[tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + Union[ + tensorboard_service.ListTensorboardTimeSeriesResponse, + Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.ReadTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + Union[ + tensorboard_service.ReadTensorboardBlobDataResponse, + Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse] + ]]: + raise NotImplementedError() + + @property + def write_tensorboard_experiment_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + Union[ + tensorboard_service.WriteTensorboardExperimentDataResponse, + Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse] + ]]: + raise NotImplementedError() + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + Union[ + tensorboard_service.WriteTensorboardRunDataResponse, + Awaitable[tensorboard_service.WriteTensorboardRunDataResponse] + ]]: + raise NotImplementedError() + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'TensorboardServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py new file mode 100644 index 0000000000..43cd66a8ca --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py @@ -0,0 +1,1222 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_service +from google.cloud.aiplatform_v1.types import tensorboard_time_series +from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO + + +class TensorboardServiceGrpcTransport(TensorboardServiceTransport): + """gRPC backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard' not in self._stubs: + self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboard', + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_tensorboard'] + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + tensorboard.Tensorboard]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + ~.Tensorboard]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard' not in self._stubs: + self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboard', + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs['get_tensorboard'] + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard' not in self._stubs: + self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboard', + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_tensorboard'] + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + tensorboard_service.ListTensorboardsResponse]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + ~.ListTensorboardsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboards' not in self._stubs: + self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboards', + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs['list_tensorboards'] + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard' not in self._stubs: + self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboard', + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard'] + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_experiment' not in self._stubs: + self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardExperiment', + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['create_tensorboard_experiment'] + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_experiment' not in self._stubs: + self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardExperiment', + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['get_tensorboard_experiment'] + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_experiment' not in self._stubs: + self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardExperiment', + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['update_tensorboard_experiment'] + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + tensorboard_service.ListTensorboardExperimentsResponse]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + ~.ListTensorboardExperimentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_experiments' not in self._stubs: + self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardExperiments', + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs['list_tensorboard_experiments'] + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_experiment' not in self._stubs: + self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardExperiment', + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_experiment'] + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_run' not in self._stubs: + self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardRun', + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['create_tensorboard_run'] + + @property + def batch_create_tensorboard_runs(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + tensorboard_service.BatchCreateTensorboardRunsResponse]: + r"""Return a callable for the batch create tensorboard runs method over gRPC. + + Batch create TensorboardRuns. + + Returns: + Callable[[~.BatchCreateTensorboardRunsRequest], + ~.BatchCreateTensorboardRunsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_runs' not in self._stubs: + self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardRuns', + request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_runs'] + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + tensorboard_run.TensorboardRun]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_run' not in self._stubs: + self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardRun', + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['get_tensorboard_run'] + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_run' not in self._stubs: + self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardRun', + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['update_tensorboard_run'] + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + tensorboard_service.ListTensorboardRunsResponse]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + ~.ListTensorboardRunsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_runs' not in self._stubs: + self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardRuns', + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs['list_tensorboard_runs'] + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_run' not in self._stubs: + self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardRun', + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_run'] + + @property + def batch_create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]: + r"""Return a callable for the batch create tensorboard time + series method over gRPC. + + Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Returns: + Callable[[~.BatchCreateTensorboardTimeSeriesRequest], + ~.BatchCreateTensorboardTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_time_series' not in self._stubs: + self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardTimeSeries', + request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_time_series'] + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_time_series' not in self._stubs: + self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardTimeSeries', + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['create_tensorboard_time_series'] + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_time_series' not in self._stubs: + self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardTimeSeries', + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['get_tensorboard_time_series'] + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_time_series' not in self._stubs: + self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardTimeSeries', + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['update_tensorboard_time_series'] + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + tensorboard_service.ListTensorboardTimeSeriesResponse]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + ~.ListTensorboardTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_time_series' not in self._stubs: + self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardTimeSeries', + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['list_tensorboard_time_series'] + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_time_series' not in self._stubs: + self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardTimeSeries', + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_time_series'] + + @property + def batch_read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the batch read tensorboard time + series data method over gRPC. + + Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Returns: + Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], + ~.BatchReadTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_tensorboard_time_series_data' not in self._stubs: + self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/BatchReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['batch_read_tensorboard_time_series_data'] + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + tensorboard_service.ReadTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + ~.ReadTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_time_series_data' not in self._stubs: + self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_time_series_data'] + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + tensorboard_service.ReadTensorboardBlobDataResponse]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + ~.ReadTensorboardBlobDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_blob_data' not in self._stubs: + self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardBlobData', + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_blob_data'] + + @property + def write_tensorboard_experiment_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + tensorboard_service.WriteTensorboardExperimentDataResponse]: + r"""Return a callable for the write tensorboard experiment + data method over gRPC. + + Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardExperimentDataRequest], + ~.WriteTensorboardExperimentDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_experiment_data' not in self._stubs: + self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardExperimentData', + request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_experiment_data'] + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + tensorboard_service.WriteTensorboardRunDataResponse]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + ~.WriteTensorboardRunDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_run_data' not in self._stubs: + self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardRunData', + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_run_data'] + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + ~.ExportTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_tensorboard_time_series_data' not in self._stubs: + self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ExportTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['export_tensorboard_time_series_data'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'TensorboardServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..03281f83b9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py @@ -0,0 +1,1221 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_service +from google.cloud.aiplatform_v1.types import tensorboard_time_series +from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import TensorboardServiceGrpcTransport + + +class TensorboardServiceGrpcAsyncIOTransport(TensorboardServiceTransport): + """gRPC AsyncIO backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard' not in self._stubs: + self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboard', + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_tensorboard'] + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + Awaitable[tensorboard.Tensorboard]]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + Awaitable[~.Tensorboard]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard' not in self._stubs: + self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboard', + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs['get_tensorboard'] + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard' not in self._stubs: + self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboard', + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_tensorboard'] + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + Awaitable[tensorboard_service.ListTensorboardsResponse]]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + Awaitable[~.ListTensorboardsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboards' not in self._stubs: + self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboards', + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs['list_tensorboards'] + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard' not in self._stubs: + self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboard', + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard'] + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_experiment' not in self._stubs: + self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardExperiment', + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['create_tensorboard_experiment'] + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + Awaitable[tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_experiment' not in self._stubs: + self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardExperiment', + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['get_tensorboard_experiment'] + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_experiment' not in self._stubs: + self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardExperiment', + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['update_tensorboard_experiment'] + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + Awaitable[~.ListTensorboardExperimentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_experiments' not in self._stubs: + self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardExperiments', + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs['list_tensorboard_experiments'] + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_experiment' not in self._stubs: + self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardExperiment', + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_experiment'] + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun]]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_run' not in self._stubs: + self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardRun', + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['create_tensorboard_run'] + + @property + def batch_create_tensorboard_runs(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse]]: + r"""Return a callable for the batch create tensorboard runs method over gRPC. + + Batch create TensorboardRuns. + + Returns: + Callable[[~.BatchCreateTensorboardRunsRequest], + Awaitable[~.BatchCreateTensorboardRunsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_runs' not in self._stubs: + self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardRuns', + request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_runs'] + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + Awaitable[tensorboard_run.TensorboardRun]]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_run' not in self._stubs: + self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardRun', + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['get_tensorboard_run'] + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun]]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_run' not in self._stubs: + self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardRun', + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['update_tensorboard_run'] + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + Awaitable[tensorboard_service.ListTensorboardRunsResponse]]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + Awaitable[~.ListTensorboardRunsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_runs' not in self._stubs: + self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardRuns', + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs['list_tensorboard_runs'] + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_run' not in self._stubs: + self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardRun', + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_run'] + + @property + def batch_create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]]: + r"""Return a callable for the batch create tensorboard time + series method over gRPC. + + Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Returns: + Callable[[~.BatchCreateTensorboardTimeSeriesRequest], + Awaitable[~.BatchCreateTensorboardTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_time_series' not in self._stubs: + self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardTimeSeries', + request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_time_series'] + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_time_series' not in self._stubs: + self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardTimeSeries', + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['create_tensorboard_time_series'] + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + Awaitable[tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_time_series' not in self._stubs: + self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardTimeSeries', + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['get_tensorboard_time_series'] + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_time_series' not in self._stubs: + self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardTimeSeries', + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['update_tensorboard_time_series'] + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + Awaitable[~.ListTensorboardTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_time_series' not in self._stubs: + self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardTimeSeries', + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['list_tensorboard_time_series'] + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_time_series' not in self._stubs: + self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardTimeSeries', + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_time_series'] + + @property + def batch_read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the batch read tensorboard time + series data method over gRPC. + + Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Returns: + Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], + Awaitable[~.BatchReadTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_tensorboard_time_series_data' not in self._stubs: + self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/BatchReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['batch_read_tensorboard_time_series_data'] + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + Awaitable[~.ReadTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_time_series_data' not in self._stubs: + self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_time_series_data'] + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse]]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + Awaitable[~.ReadTensorboardBlobDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_blob_data' not in self._stubs: + self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardBlobData', + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_blob_data'] + + @property + def write_tensorboard_experiment_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse]]: + r"""Return a callable for the write tensorboard experiment + data method over gRPC. + + Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardExperimentDataRequest], + Awaitable[~.WriteTensorboardExperimentDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_experiment_data' not in self._stubs: + self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardExperimentData', + request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_experiment_data'] + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + Awaitable[tensorboard_service.WriteTensorboardRunDataResponse]]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + Awaitable[~.WriteTensorboardRunDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_run_data' not in self._stubs: + self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardRunData', + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_run_data'] + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + Awaitable[~.ExportTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_tensorboard_time_series_data' not in self._stubs: + self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.TensorboardService/ExportTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['export_tensorboard_time_series_data'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'TensorboardServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py new file mode 100644 index 0000000000..5a652ada3e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import VizierServiceClient +from .async_client import VizierServiceAsyncClient + +__all__ = ( + 'VizierServiceClient', + 'VizierServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py new file mode 100644 index 0000000000..e21b181c98 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py @@ -0,0 +1,2264 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.vizier_service import pagers +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import study as gca_study +from google.cloud.aiplatform_v1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport +from .client import VizierServiceClient + + +class VizierServiceAsyncClient: + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + """ + + _client: VizierServiceClient + + DEFAULT_ENDPOINT = VizierServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = VizierServiceClient.DEFAULT_MTLS_ENDPOINT + + custom_job_path = staticmethod(VizierServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(VizierServiceClient.parse_custom_job_path) + study_path = staticmethod(VizierServiceClient.study_path) + parse_study_path = staticmethod(VizierServiceClient.parse_study_path) + trial_path = staticmethod(VizierServiceClient.trial_path) + parse_trial_path = staticmethod(VizierServiceClient.parse_trial_path) + common_billing_account_path = staticmethod(VizierServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(VizierServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(VizierServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(VizierServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(VizierServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(VizierServiceClient.parse_common_organization_path) + common_project_path = staticmethod(VizierServiceClient.common_project_path) + parse_common_project_path = staticmethod(VizierServiceClient.parse_common_project_path) + common_location_path = staticmethod(VizierServiceClient.common_location_path) + parse_common_location_path = staticmethod(VizierServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceAsyncClient: The constructed client. + """ + return VizierServiceClient.from_service_account_info.__func__(VizierServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceAsyncClient: The constructed client. + """ + return VizierServiceClient.from_service_account_file.__func__(VizierServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return VizierServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> VizierServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VizierServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(VizierServiceClient).get_transport_class, type(VizierServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, VizierServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vizier service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.VizierServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = VizierServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_study(self, + request: Union[vizier_service.CreateStudyRequest, dict] = None, + *, + parent: str = None, + study: gca_study.Study = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_study.Study: + r"""Creates a Study. A resource name will be generated + after creation of the Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + study = aiplatform_v1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = await client.create_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateStudyRequest, dict]): + The request object. Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1.VizierService.CreateStudy]. + parent (:class:`str`): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + study (:class:`google.cloud.aiplatform_v1.types.Study`): + Required. The Study configuration + used to create the Study. + + This corresponds to the ``study`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, study]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.CreateStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if study is not None: + request.study = study + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_study, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_study(self, + request: Union[vizier_service.GetStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Gets a Study by name. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = await client.get_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetStudyRequest, dict]): + The request object. Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1.VizierService.GetStudy]. + name (:class:`str`): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.GetStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_study, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_studies(self, + request: Union[vizier_service.ListStudiesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListStudiesAsyncPager: + r"""Lists all the studies in a region for an associated + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_studies(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListStudiesRequest, dict]): + The request object. Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesAsyncPager: + Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListStudiesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_studies, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListStudiesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_study(self, + request: Union[vizier_service.DeleteStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + await client.delete_study(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteStudyRequest, dict]): + The request object. Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1.VizierService.DeleteStudy]. + name (:class:`str`): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.DeleteStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_study, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def lookup_study(self, + request: Union[vizier_service.LookupStudyRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_lookup_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = await client.lookup_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.LookupStudyRequest, dict]): + The request object. Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1.VizierService.LookupStudy]. + parent (:class:`str`): + Required. The resource name of the Location to get the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.LookupStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.lookup_study, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def suggest_trials(self, + request: Union[vizier_service.SuggestTrialsRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_suggest_trials(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SuggestTrialsRequest, dict]): + The request object. Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.SuggestTrialsResponse` + Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.SuggestTrialsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.suggest_trials, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vizier_service.SuggestTrialsResponse, + metadata_type=vizier_service.SuggestTrialsMetadata, + ) + + # Done; return the response. + return response + + async def create_trial(self, + request: Union[vizier_service.CreateTrialRequest, dict] = None, + *, + parent: str = None, + trial: study.Trial = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a user provided Trial to a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_create_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTrialRequest, dict]): + The request object. Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1.VizierService.CreateTrial]. + parent (:class:`str`): + Required. The resource name of the Study to create the + Trial in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + trial (:class:`google.cloud.aiplatform_v1.types.Trial`): + Required. The Trial to create. + This corresponds to the ``trial`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, trial]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.CreateTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if trial is not None: + request.trial = trial + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_trial, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_trial(self, + request: Union[vizier_service.GetTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Gets a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_get_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.get_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTrialRequest, dict]): + The request object. Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1.VizierService.GetTrial]. + name (:class:`str`): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.GetTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_trial, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_trials(self, + request: Union[vizier_service.ListTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrialsAsyncPager: + r"""Lists the Trials associated with a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_trials(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. + parent (:class:`str`): + Required. The resource name of the Study to list the + Trial from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsAsyncPager: + Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListTrialsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_trials, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTrialsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def add_trial_measurement(self, + request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = await client.add_trial_measurement(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest, dict]): + The request object. Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.AddTrialMeasurementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_trial_measurement, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def complete_trial(self, + request: Union[vizier_service.CompleteTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Marks a Trial as complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_complete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.complete_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CompleteTrialRequest, dict]): + The request object. Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1.VizierService.CompleteTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.CompleteTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.complete_trial, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_trial(self, + request: Union[vizier_service.DeleteTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_delete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + await client.delete_trial(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTrialRequest, dict]): + The request object. Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1.VizierService.DeleteTrial]. + name (:class:`str`): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.DeleteTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_trial, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def check_trial_early_stopping_state(self, + request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest, dict]): + The request object. Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateResponse` + Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_trial_early_stopping_state, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vizier_service.CheckTrialEarlyStoppingStateResponse, + metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, + ) + + # Done; return the response. + return response + + async def stop_trial(self, + request: Union[vizier_service.StopTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Stops a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_stop_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.stop_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.StopTrialRequest, dict]): + The request object. Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1.VizierService.StopTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.StopTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stop_trial, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_optimal_trials(self, + request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vizier_service.ListOptimalTrialsResponse: + r"""Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + async def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_optimal_trials(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. + parent (:class:`str`): + Required. The name of the Study that + the optimal Trial belongs to. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse: + Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListOptimalTrialsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_optimal_trials, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VizierServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py new file mode 100644 index 0000000000..bd8e6d47b8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py @@ -0,0 +1,2481 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.vizier_service import pagers +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import study as gca_study +from google.cloud.aiplatform_v1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import VizierServiceGrpcTransport +from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport + + +class VizierServiceClientMeta(type): + """Metaclass for the VizierService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] + _transport_registry["grpc"] = VizierServiceGrpcTransport + _transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[VizierServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class VizierServiceClient(metaclass=VizierServiceClientMeta): + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VizierServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VizierServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def study_path(project: str,location: str,study: str,) -> str: + """Returns a fully-qualified study string.""" + return "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) + + @staticmethod + def parse_study_path(path: str) -> Dict[str,str]: + """Parses a study path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def trial_path(project: str,location: str,study: str,trial: str,) -> str: + """Returns a fully-qualified trial string.""" + return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + + @staticmethod + def parse_trial_path(path: str) -> Dict[str,str]: + """Parses a trial path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, VizierServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vizier service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, VizierServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, VizierServiceTransport): + # transport is a VizierServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_study(self, + request: Union[vizier_service.CreateStudyRequest, dict] = None, + *, + parent: str = None, + study: gca_study.Study = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_study.Study: + r"""Creates a Study. A resource name will be generated + after creation of the Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + study = aiplatform_v1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = client.create_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateStudyRequest, dict]): + The request object. Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1.VizierService.CreateStudy]. + parent (str): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + study (google.cloud.aiplatform_v1.types.Study): + Required. The Study configuration + used to create the Study. + + This corresponds to the ``study`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, study]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CreateStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CreateStudyRequest): + request = vizier_service.CreateStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if study is not None: + request.study = study + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_study(self, + request: Union[vizier_service.GetStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Gets a Study by name. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = client.get_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetStudyRequest, dict]): + The request object. Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1.VizierService.GetStudy]. + name (str): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.GetStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.GetStudyRequest): + request = vizier_service.GetStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_studies(self, + request: Union[vizier_service.ListStudiesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListStudiesPager: + r"""Lists all the studies in a region for an associated + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_studies(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListStudiesRequest, dict]): + The request object. Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. + parent (str): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesPager: + Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListStudiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListStudiesRequest): + request = vizier_service.ListStudiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_studies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListStudiesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_study(self, + request: Union[vizier_service.DeleteStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + client.delete_study(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteStudyRequest, dict]): + The request object. Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1.VizierService.DeleteStudy]. + name (str): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.DeleteStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.DeleteStudyRequest): + request = vizier_service.DeleteStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def lookup_study(self, + request: Union[vizier_service.LookupStudyRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_lookup_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = client.lookup_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.LookupStudyRequest, dict]): + The request object. Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1.VizierService.LookupStudy]. + parent (str): + Required. The resource name of the Location to get the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.LookupStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.LookupStudyRequest): + request = vizier_service.LookupStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.lookup_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def suggest_trials(self, + request: Union[vizier_service.SuggestTrialsRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_suggest_trials(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.SuggestTrialsRequest, dict]): + The request object. Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.SuggestTrialsResponse` + Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.SuggestTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.SuggestTrialsRequest): + request = vizier_service.SuggestTrialsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.suggest_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + vizier_service.SuggestTrialsResponse, + metadata_type=vizier_service.SuggestTrialsMetadata, + ) + + # Done; return the response. + return response + + def create_trial(self, + request: Union[vizier_service.CreateTrialRequest, dict] = None, + *, + parent: str = None, + trial: study.Trial = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a user provided Trial to a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_create_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTrialRequest, dict]): + The request object. Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1.VizierService.CreateTrial]. + parent (str): + Required. The resource name of the Study to create the + Trial in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + trial (google.cloud.aiplatform_v1.types.Trial): + Required. The Trial to create. + This corresponds to the ``trial`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, trial]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CreateTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CreateTrialRequest): + request = vizier_service.CreateTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if trial is not None: + request.trial = trial + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_trial(self, + request: Union[vizier_service.GetTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Gets a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_get_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = client.get_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTrialRequest, dict]): + The request object. Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1.VizierService.GetTrial]. + name (str): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.GetTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.GetTrialRequest): + request = vizier_service.GetTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_trials(self, + request: Union[vizier_service.ListTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrialsPager: + r"""Lists the Trials associated with a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_trials(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. + parent (str): + Required. The resource name of the Study to list the + Trial from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsPager: + Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListTrialsRequest): + request = vizier_service.ListTrialsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTrialsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_trial_measurement(self, + request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = client.add_trial_measurement(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest, dict]): + The request object. Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.AddTrialMeasurementRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.AddTrialMeasurementRequest): + request = vizier_service.AddTrialMeasurementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_trial_measurement] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def complete_trial(self, + request: Union[vizier_service.CompleteTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Marks a Trial as complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_complete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = client.complete_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CompleteTrialRequest, dict]): + The request object. Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1.VizierService.CompleteTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CompleteTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CompleteTrialRequest): + request = vizier_service.CompleteTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.complete_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_trial(self, + request: Union[vizier_service.DeleteTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_delete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + client.delete_trial(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteTrialRequest, dict]): + The request object. Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1.VizierService.DeleteTrial]. + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.DeleteTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.DeleteTrialRequest): + request = vizier_service.DeleteTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def check_trial_early_stopping_state(self, + request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest, dict]): + The request object. Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateResponse` + Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CheckTrialEarlyStoppingStateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CheckTrialEarlyStoppingStateRequest): + request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_trial_early_stopping_state] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + vizier_service.CheckTrialEarlyStoppingStateResponse, + metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, + ) + + # Done; return the response. + return response + + def stop_trial(self, + request: Union[vizier_service.StopTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Stops a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_stop_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = client.stop_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.StopTrialRequest, dict]): + The request object. Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1.VizierService.StopTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.StopTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.StopTrialRequest): + request = vizier_service.StopTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stop_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_optimal_trials(self, + request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vizier_service.ListOptimalTrialsResponse: + r"""Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_optimal_trials(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. + parent (str): + Required. The name of the Study that + the optimal Trial belongs to. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse: + Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListOptimalTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListOptimalTrialsRequest): + request = vizier_service.ListOptimalTrialsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_optimal_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VizierServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py new file mode 100644 index 0000000000..1e48dacda6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import vizier_service + + +class ListStudiesPager: + """A pager for iterating through ``list_studies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``studies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListStudies`` requests and continue to iterate + through the ``studies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., vizier_service.ListStudiesResponse], + request: vizier_service.ListStudiesRequest, + response: vizier_service.ListStudiesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListStudiesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListStudiesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListStudiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[vizier_service.ListStudiesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[study.Study]: + for page in self.pages: + yield from page.studies + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListStudiesAsyncPager: + """A pager for iterating through ``list_studies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``studies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListStudies`` requests and continue to iterate + through the ``studies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[vizier_service.ListStudiesResponse]], + request: vizier_service.ListStudiesRequest, + response: vizier_service.ListStudiesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListStudiesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListStudiesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListStudiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[vizier_service.ListStudiesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[study.Study]: + async def async_generator(): + async for page in self.pages: + for response in page.studies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrialsPager: + """A pager for iterating through ``list_trials`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``trials`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTrials`` requests and continue to iterate + through the ``trials`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., vizier_service.ListTrialsResponse], + request: vizier_service.ListTrialsRequest, + response: vizier_service.ListTrialsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTrialsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTrialsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListTrialsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[vizier_service.ListTrialsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[study.Trial]: + for page in self.pages: + yield from page.trials + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrialsAsyncPager: + """A pager for iterating through ``list_trials`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``trials`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTrials`` requests and continue to iterate + through the ``trials`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[vizier_service.ListTrialsResponse]], + request: vizier_service.ListTrialsRequest, + response: vizier_service.ListTrialsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTrialsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTrialsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListTrialsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[vizier_service.ListTrialsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[study.Trial]: + async def async_generator(): + async for page in self.pages: + for response in page.trials: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py new file mode 100644 index 0000000000..b2efe6a224 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import VizierServiceTransport +from .grpc import VizierServiceGrpcTransport +from .grpc_asyncio import VizierServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] +_transport_registry['grpc'] = VizierServiceGrpcTransport +_transport_registry['grpc_asyncio'] = VizierServiceGrpcAsyncIOTransport + +__all__ = ( + 'VizierServiceTransport', + 'VizierServiceGrpcTransport', + 'VizierServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py new file mode 100644 index 0000000000..e7232ba277 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py @@ -0,0 +1,451 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import study as gca_study +from google.cloud.aiplatform_v1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class VizierServiceTransport(abc.ABC): + """Abstract transport class for VizierService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_study: gapic_v1.method.wrap_method( + self.create_study, + default_timeout=None, + client_info=client_info, + ), + self.get_study: gapic_v1.method.wrap_method( + self.get_study, + default_timeout=None, + client_info=client_info, + ), + self.list_studies: gapic_v1.method.wrap_method( + self.list_studies, + default_timeout=None, + client_info=client_info, + ), + self.delete_study: gapic_v1.method.wrap_method( + self.delete_study, + default_timeout=None, + client_info=client_info, + ), + self.lookup_study: gapic_v1.method.wrap_method( + self.lookup_study, + default_timeout=None, + client_info=client_info, + ), + self.suggest_trials: gapic_v1.method.wrap_method( + self.suggest_trials, + default_timeout=None, + client_info=client_info, + ), + self.create_trial: gapic_v1.method.wrap_method( + self.create_trial, + default_timeout=None, + client_info=client_info, + ), + self.get_trial: gapic_v1.method.wrap_method( + self.get_trial, + default_timeout=None, + client_info=client_info, + ), + self.list_trials: gapic_v1.method.wrap_method( + self.list_trials, + default_timeout=None, + client_info=client_info, + ), + self.add_trial_measurement: gapic_v1.method.wrap_method( + self.add_trial_measurement, + default_timeout=None, + client_info=client_info, + ), + self.complete_trial: gapic_v1.method.wrap_method( + self.complete_trial, + default_timeout=None, + client_info=client_info, + ), + self.delete_trial: gapic_v1.method.wrap_method( + self.delete_trial, + default_timeout=None, + client_info=client_info, + ), + self.check_trial_early_stopping_state: gapic_v1.method.wrap_method( + self.check_trial_early_stopping_state, + default_timeout=None, + client_info=client_info, + ), + self.stop_trial: gapic_v1.method.wrap_method( + self.stop_trial, + default_timeout=None, + client_info=client_info, + ), + self.list_optimal_trials: gapic_v1.method.wrap_method( + self.list_optimal_trials, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + Union[ + gca_study.Study, + Awaitable[gca_study.Study] + ]]: + raise NotImplementedError() + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + Union[ + study.Study, + Awaitable[study.Study] + ]]: + raise NotImplementedError() + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + Union[ + vizier_service.ListStudiesResponse, + Awaitable[vizier_service.ListStudiesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + Union[ + study.Study, + Awaitable[study.Study] + ]]: + raise NotImplementedError() + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + Union[ + vizier_service.ListTrialsResponse, + Awaitable[vizier_service.ListTrialsResponse] + ]]: + raise NotImplementedError() + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + Union[ + vizier_service.ListOptimalTrialsResponse, + Awaitable[vizier_service.ListOptimalTrialsResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'VizierServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py new file mode 100644 index 0000000000..763ecb69ab --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py @@ -0,0 +1,876 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import study as gca_study +from google.cloud.aiplatform_v1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO + + +class VizierServiceGrpcTransport(VizierServiceTransport): + """gRPC backend transport for VizierService. + + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + gca_study.Study]: + r"""Return a callable for the create study method over gRPC. + + Creates a Study. A resource name will be generated + after creation of the Study. + + Returns: + Callable[[~.CreateStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_study' not in self._stubs: + self._stubs['create_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CreateStudy', + request_serializer=vizier_service.CreateStudyRequest.serialize, + response_deserializer=gca_study.Study.deserialize, + ) + return self._stubs['create_study'] + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + study.Study]: + r"""Return a callable for the get study method over gRPC. + + Gets a Study by name. + + Returns: + Callable[[~.GetStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_study' not in self._stubs: + self._stubs['get_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/GetStudy', + request_serializer=vizier_service.GetStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['get_study'] + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + vizier_service.ListStudiesResponse]: + r"""Return a callable for the list studies method over gRPC. + + Lists all the studies in a region for an associated + project. + + Returns: + Callable[[~.ListStudiesRequest], + ~.ListStudiesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_studies' not in self._stubs: + self._stubs['list_studies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/ListStudies', + request_serializer=vizier_service.ListStudiesRequest.serialize, + response_deserializer=vizier_service.ListStudiesResponse.deserialize, + ) + return self._stubs['list_studies'] + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete study method over gRPC. + + Deletes a Study. + + Returns: + Callable[[~.DeleteStudyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_study' not in self._stubs: + self._stubs['delete_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/DeleteStudy', + request_serializer=vizier_service.DeleteStudyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_study'] + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + study.Study]: + r"""Return a callable for the lookup study method over gRPC. + + Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Returns: + Callable[[~.LookupStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'lookup_study' not in self._stubs: + self._stubs['lookup_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/LookupStudy', + request_serializer=vizier_service.LookupStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['lookup_study'] + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + operations_pb2.Operation]: + r"""Return a callable for the suggest trials method over gRPC. + + Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Returns: + Callable[[~.SuggestTrialsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'suggest_trials' not in self._stubs: + self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/SuggestTrials', + request_serializer=vizier_service.SuggestTrialsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['suggest_trials'] + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + study.Trial]: + r"""Return a callable for the create trial method over gRPC. + + Adds a user provided Trial to a Study. + + Returns: + Callable[[~.CreateTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_trial' not in self._stubs: + self._stubs['create_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CreateTrial', + request_serializer=vizier_service.CreateTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['create_trial'] + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + study.Trial]: + r"""Return a callable for the get trial method over gRPC. + + Gets a Trial. + + Returns: + Callable[[~.GetTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_trial' not in self._stubs: + self._stubs['get_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/GetTrial', + request_serializer=vizier_service.GetTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['get_trial'] + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + vizier_service.ListTrialsResponse]: + r"""Return a callable for the list trials method over gRPC. + + Lists the Trials associated with a Study. + + Returns: + Callable[[~.ListTrialsRequest], + ~.ListTrialsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_trials' not in self._stubs: + self._stubs['list_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/ListTrials', + request_serializer=vizier_service.ListTrialsRequest.serialize, + response_deserializer=vizier_service.ListTrialsResponse.deserialize, + ) + return self._stubs['list_trials'] + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + study.Trial]: + r"""Return a callable for the add trial measurement method over gRPC. + + Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Returns: + Callable[[~.AddTrialMeasurementRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_trial_measurement' not in self._stubs: + self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/AddTrialMeasurement', + request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['add_trial_measurement'] + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + study.Trial]: + r"""Return a callable for the complete trial method over gRPC. + + Marks a Trial as complete. + + Returns: + Callable[[~.CompleteTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_trial' not in self._stubs: + self._stubs['complete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CompleteTrial', + request_serializer=vizier_service.CompleteTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['complete_trial'] + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete trial method over gRPC. + + Deletes a Trial. + + Returns: + Callable[[~.DeleteTrialRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_trial' not in self._stubs: + self._stubs['delete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/DeleteTrial', + request_serializer=vizier_service.DeleteTrialRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_trial'] + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + operations_pb2.Operation]: + r"""Return a callable for the check trial early stopping + state method over gRPC. + + Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Returns: + Callable[[~.CheckTrialEarlyStoppingStateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_trial_early_stopping_state' not in self._stubs: + self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CheckTrialEarlyStoppingState', + request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['check_trial_early_stopping_state'] + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + study.Trial]: + r"""Return a callable for the stop trial method over gRPC. + + Stops a Trial. + + Returns: + Callable[[~.StopTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_trial' not in self._stubs: + self._stubs['stop_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/StopTrial', + request_serializer=vizier_service.StopTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['stop_trial'] + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + vizier_service.ListOptimalTrialsResponse]: + r"""Return a callable for the list optimal trials method over gRPC. + + Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Returns: + Callable[[~.ListOptimalTrialsRequest], + ~.ListOptimalTrialsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_optimal_trials' not in self._stubs: + self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/ListOptimalTrials', + request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, + response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, + ) + return self._stubs['list_optimal_trials'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'VizierServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..a084125b77 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py @@ -0,0 +1,875 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import study as gca_study +from google.cloud.aiplatform_v1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import VizierServiceGrpcTransport + + +class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): + """gRPC AsyncIO backend transport for VizierService. + + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + Awaitable[gca_study.Study]]: + r"""Return a callable for the create study method over gRPC. + + Creates a Study. A resource name will be generated + after creation of the Study. + + Returns: + Callable[[~.CreateStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_study' not in self._stubs: + self._stubs['create_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CreateStudy', + request_serializer=vizier_service.CreateStudyRequest.serialize, + response_deserializer=gca_study.Study.deserialize, + ) + return self._stubs['create_study'] + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + Awaitable[study.Study]]: + r"""Return a callable for the get study method over gRPC. + + Gets a Study by name. + + Returns: + Callable[[~.GetStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_study' not in self._stubs: + self._stubs['get_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/GetStudy', + request_serializer=vizier_service.GetStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['get_study'] + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + Awaitable[vizier_service.ListStudiesResponse]]: + r"""Return a callable for the list studies method over gRPC. + + Lists all the studies in a region for an associated + project. + + Returns: + Callable[[~.ListStudiesRequest], + Awaitable[~.ListStudiesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_studies' not in self._stubs: + self._stubs['list_studies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/ListStudies', + request_serializer=vizier_service.ListStudiesRequest.serialize, + response_deserializer=vizier_service.ListStudiesResponse.deserialize, + ) + return self._stubs['list_studies'] + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete study method over gRPC. + + Deletes a Study. + + Returns: + Callable[[~.DeleteStudyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_study' not in self._stubs: + self._stubs['delete_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/DeleteStudy', + request_serializer=vizier_service.DeleteStudyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_study'] + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + Awaitable[study.Study]]: + r"""Return a callable for the lookup study method over gRPC. + + Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Returns: + Callable[[~.LookupStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'lookup_study' not in self._stubs: + self._stubs['lookup_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/LookupStudy', + request_serializer=vizier_service.LookupStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['lookup_study'] + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the suggest trials method over gRPC. + + Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Returns: + Callable[[~.SuggestTrialsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'suggest_trials' not in self._stubs: + self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/SuggestTrials', + request_serializer=vizier_service.SuggestTrialsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['suggest_trials'] + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the create trial method over gRPC. + + Adds a user provided Trial to a Study. + + Returns: + Callable[[~.CreateTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_trial' not in self._stubs: + self._stubs['create_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CreateTrial', + request_serializer=vizier_service.CreateTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['create_trial'] + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the get trial method over gRPC. + + Gets a Trial. + + Returns: + Callable[[~.GetTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_trial' not in self._stubs: + self._stubs['get_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/GetTrial', + request_serializer=vizier_service.GetTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['get_trial'] + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + Awaitable[vizier_service.ListTrialsResponse]]: + r"""Return a callable for the list trials method over gRPC. + + Lists the Trials associated with a Study. + + Returns: + Callable[[~.ListTrialsRequest], + Awaitable[~.ListTrialsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_trials' not in self._stubs: + self._stubs['list_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/ListTrials', + request_serializer=vizier_service.ListTrialsRequest.serialize, + response_deserializer=vizier_service.ListTrialsResponse.deserialize, + ) + return self._stubs['list_trials'] + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the add trial measurement method over gRPC. + + Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Returns: + Callable[[~.AddTrialMeasurementRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_trial_measurement' not in self._stubs: + self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/AddTrialMeasurement', + request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['add_trial_measurement'] + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the complete trial method over gRPC. + + Marks a Trial as complete. + + Returns: + Callable[[~.CompleteTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_trial' not in self._stubs: + self._stubs['complete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CompleteTrial', + request_serializer=vizier_service.CompleteTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['complete_trial'] + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete trial method over gRPC. + + Deletes a Trial. + + Returns: + Callable[[~.DeleteTrialRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_trial' not in self._stubs: + self._stubs['delete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/DeleteTrial', + request_serializer=vizier_service.DeleteTrialRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_trial'] + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the check trial early stopping + state method over gRPC. + + Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Returns: + Callable[[~.CheckTrialEarlyStoppingStateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_trial_early_stopping_state' not in self._stubs: + self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/CheckTrialEarlyStoppingState', + request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['check_trial_early_stopping_state'] + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the stop trial method over gRPC. + + Stops a Trial. + + Returns: + Callable[[~.StopTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_trial' not in self._stubs: + self._stubs['stop_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/StopTrial', + request_serializer=vizier_service.StopTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['stop_trial'] + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + Awaitable[vizier_service.ListOptimalTrialsResponse]]: + r"""Return a callable for the list optimal trials method over gRPC. + + Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Returns: + Callable[[~.ListOptimalTrialsRequest], + Awaitable[~.ListOptimalTrialsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_optimal_trials' not in self._stubs: + self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.VizierService/ListOptimalTrials', + request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, + response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, + ) + return self._stubs['list_optimal_trials'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'VizierServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py new file mode 100644 index 0000000000..a86559b5d4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py @@ -0,0 +1,1026 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .annotation import ( + Annotation, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .artifact import ( + Artifact, +) +from .batch_prediction_job import ( + BatchPredictionJob, +) +from .completion_stats import ( + CompletionStats, +) +from .context import ( + Context, +) +from .custom_job import ( + ContainerSpec, + CustomJob, + CustomJobSpec, + PythonPackageSpec, + Scheduling, + WorkerPoolSpec, +) +from .data_item import ( + DataItem, +) +from .data_labeling_job import ( + ActiveLearningConfig, + DataLabelingJob, + SampleConfig, + TrainingConfig, +) +from .dataset import ( + Dataset, + ExportDataConfig, + ImportDataConfig, +) +from .dataset_service import ( + CreateDatasetOperationMetadata, + CreateDatasetRequest, + DeleteDatasetRequest, + ExportDataOperationMetadata, + ExportDataRequest, + ExportDataResponse, + GetAnnotationSpecRequest, + GetDatasetRequest, + ImportDataOperationMetadata, + ImportDataRequest, + ImportDataResponse, + ListAnnotationsRequest, + ListAnnotationsResponse, + ListDataItemsRequest, + ListDataItemsResponse, + ListDatasetsRequest, + ListDatasetsResponse, + UpdateDatasetRequest, +) +from .deployed_index_ref import ( + DeployedIndexRef, +) +from .deployed_model_ref import ( + DeployedModelRef, +) +from .encryption_spec import ( + EncryptionSpec, +) +from .endpoint import ( + DeployedModel, + Endpoint, + PredictRequestResponseLoggingConfig, + PrivateEndpoints, +) +from .endpoint_service import ( + CreateEndpointOperationMetadata, + CreateEndpointRequest, + DeleteEndpointRequest, + DeployModelOperationMetadata, + DeployModelRequest, + DeployModelResponse, + GetEndpointRequest, + ListEndpointsRequest, + ListEndpointsResponse, + UndeployModelOperationMetadata, + UndeployModelRequest, + UndeployModelResponse, + UpdateEndpointRequest, +) +from .entity_type import ( + EntityType, +) +from .env_var import ( + EnvVar, +) +from .event import ( + Event, +) +from .execution import ( + Execution, +) +from .explanation import ( + Attribution, + BlurBaselineConfig, + ExamplesOverride, + ExamplesRestrictionsNamespace, + Explanation, + ExplanationMetadataOverride, + ExplanationParameters, + ExplanationSpec, + ExplanationSpecOverride, + FeatureNoiseSigma, + IntegratedGradientsAttribution, + ModelExplanation, + Neighbor, + SampledShapleyAttribution, + SmoothGradConfig, + XraiAttribution, +) +from .explanation_metadata import ( + ExplanationMetadata, +) +from .feature import ( + Feature, +) +from .feature_monitoring_stats import ( + FeatureStatsAnomaly, +) +from .feature_selector import ( + FeatureSelector, + IdMatcher, +) +from .featurestore import ( + Featurestore, +) +from .featurestore_monitoring import ( + FeaturestoreMonitoringConfig, +) +from .featurestore_online_service import ( + FeatureValue, + FeatureValueList, + ReadFeatureValuesRequest, + ReadFeatureValuesResponse, + StreamingReadFeatureValuesRequest, +) +from .featurestore_service import ( + BatchCreateFeaturesOperationMetadata, + BatchCreateFeaturesRequest, + BatchCreateFeaturesResponse, + BatchReadFeatureValuesOperationMetadata, + BatchReadFeatureValuesRequest, + BatchReadFeatureValuesResponse, + CreateEntityTypeOperationMetadata, + CreateEntityTypeRequest, + CreateFeatureOperationMetadata, + CreateFeatureRequest, + CreateFeaturestoreOperationMetadata, + CreateFeaturestoreRequest, + DeleteEntityTypeRequest, + DeleteFeatureRequest, + DeleteFeaturestoreRequest, + DestinationFeatureSetting, + ExportFeatureValuesOperationMetadata, + ExportFeatureValuesRequest, + ExportFeatureValuesResponse, + FeatureValueDestination, + GetEntityTypeRequest, + GetFeatureRequest, + GetFeaturestoreRequest, + ImportFeatureValuesOperationMetadata, + ImportFeatureValuesRequest, + ImportFeatureValuesResponse, + ListEntityTypesRequest, + ListEntityTypesResponse, + ListFeaturesRequest, + ListFeaturesResponse, + ListFeaturestoresRequest, + ListFeaturestoresResponse, + SearchFeaturesRequest, + SearchFeaturesResponse, + UpdateEntityTypeRequest, + UpdateFeatureRequest, + UpdateFeaturestoreOperationMetadata, + UpdateFeaturestoreRequest, +) +from .hyperparameter_tuning_job import ( + HyperparameterTuningJob, +) +from .index import ( + Index, +) +from .index_endpoint import ( + DeployedIndex, + DeployedIndexAuthConfig, + IndexEndpoint, + IndexPrivateEndpoints, +) +from .index_endpoint_service import ( + CreateIndexEndpointOperationMetadata, + CreateIndexEndpointRequest, + DeleteIndexEndpointRequest, + DeployIndexOperationMetadata, + DeployIndexRequest, + DeployIndexResponse, + GetIndexEndpointRequest, + ListIndexEndpointsRequest, + ListIndexEndpointsResponse, + MutateDeployedIndexOperationMetadata, + MutateDeployedIndexRequest, + MutateDeployedIndexResponse, + UndeployIndexOperationMetadata, + UndeployIndexRequest, + UndeployIndexResponse, + UpdateIndexEndpointRequest, +) +from .index_service import ( + CreateIndexOperationMetadata, + CreateIndexRequest, + DeleteIndexRequest, + GetIndexRequest, + ListIndexesRequest, + ListIndexesResponse, + NearestNeighborSearchOperationMetadata, + UpdateIndexOperationMetadata, + UpdateIndexRequest, +) +from .io import ( + AvroSource, + BigQueryDestination, + BigQuerySource, + ContainerRegistryDestination, + CsvDestination, + CsvSource, + GcsDestination, + GcsSource, + TFRecordDestination, +) +from .job_service import ( + CancelBatchPredictionJobRequest, + CancelCustomJobRequest, + CancelDataLabelingJobRequest, + CancelHyperparameterTuningJobRequest, + CreateBatchPredictionJobRequest, + CreateCustomJobRequest, + CreateDataLabelingJobRequest, + CreateHyperparameterTuningJobRequest, + CreateModelDeploymentMonitoringJobRequest, + DeleteBatchPredictionJobRequest, + DeleteCustomJobRequest, + DeleteDataLabelingJobRequest, + DeleteHyperparameterTuningJobRequest, + DeleteModelDeploymentMonitoringJobRequest, + GetBatchPredictionJobRequest, + GetCustomJobRequest, + GetDataLabelingJobRequest, + GetHyperparameterTuningJobRequest, + GetModelDeploymentMonitoringJobRequest, + ListBatchPredictionJobsRequest, + ListBatchPredictionJobsResponse, + ListCustomJobsRequest, + ListCustomJobsResponse, + ListDataLabelingJobsRequest, + ListDataLabelingJobsResponse, + ListHyperparameterTuningJobsRequest, + ListHyperparameterTuningJobsResponse, + ListModelDeploymentMonitoringJobsRequest, + ListModelDeploymentMonitoringJobsResponse, + PauseModelDeploymentMonitoringJobRequest, + ResumeModelDeploymentMonitoringJobRequest, + SearchModelDeploymentMonitoringStatsAnomaliesRequest, + SearchModelDeploymentMonitoringStatsAnomaliesResponse, + UpdateModelDeploymentMonitoringJobOperationMetadata, + UpdateModelDeploymentMonitoringJobRequest, +) +from .lineage_subgraph import ( + LineageSubgraph, +) +from .machine_resources import ( + AutomaticResources, + AutoscalingMetricSpec, + BatchDedicatedResources, + DedicatedResources, + DiskSpec, + MachineSpec, + NfsMount, + ResourcesConsumed, +) +from .manual_batch_tuning_parameters import ( + ManualBatchTuningParameters, +) +from .metadata_schema import ( + MetadataSchema, +) +from .metadata_service import ( + AddContextArtifactsAndExecutionsRequest, + AddContextArtifactsAndExecutionsResponse, + AddContextChildrenRequest, + AddContextChildrenResponse, + AddExecutionEventsRequest, + AddExecutionEventsResponse, + CreateArtifactRequest, + CreateContextRequest, + CreateExecutionRequest, + CreateMetadataSchemaRequest, + CreateMetadataStoreOperationMetadata, + CreateMetadataStoreRequest, + DeleteArtifactRequest, + DeleteContextRequest, + DeleteExecutionRequest, + DeleteMetadataStoreOperationMetadata, + DeleteMetadataStoreRequest, + GetArtifactRequest, + GetContextRequest, + GetExecutionRequest, + GetMetadataSchemaRequest, + GetMetadataStoreRequest, + ListArtifactsRequest, + ListArtifactsResponse, + ListContextsRequest, + ListContextsResponse, + ListExecutionsRequest, + ListExecutionsResponse, + ListMetadataSchemasRequest, + ListMetadataSchemasResponse, + ListMetadataStoresRequest, + ListMetadataStoresResponse, + PurgeArtifactsMetadata, + PurgeArtifactsRequest, + PurgeArtifactsResponse, + PurgeContextsMetadata, + PurgeContextsRequest, + PurgeContextsResponse, + PurgeExecutionsMetadata, + PurgeExecutionsRequest, + PurgeExecutionsResponse, + QueryArtifactLineageSubgraphRequest, + QueryContextLineageSubgraphRequest, + QueryExecutionInputsAndOutputsRequest, + UpdateArtifactRequest, + UpdateContextRequest, + UpdateExecutionRequest, +) +from .metadata_store import ( + MetadataStore, +) +from .migratable_resource import ( + MigratableResource, +) +from .migration_service import ( + BatchMigrateResourcesOperationMetadata, + BatchMigrateResourcesRequest, + BatchMigrateResourcesResponse, + MigrateResourceRequest, + MigrateResourceResponse, + SearchMigratableResourcesRequest, + SearchMigratableResourcesResponse, +) +from .model import ( + Model, + ModelContainerSpec, + Port, + PredictSchemata, +) +from .model_deployment_monitoring_job import ( + ModelDeploymentMonitoringBigQueryTable, + ModelDeploymentMonitoringJob, + ModelDeploymentMonitoringObjectiveConfig, + ModelDeploymentMonitoringScheduleConfig, + ModelMonitoringStatsAnomalies, + ModelDeploymentMonitoringObjectiveType, +) +from .model_evaluation import ( + ModelEvaluation, +) +from .model_evaluation_slice import ( + ModelEvaluationSlice, +) +from .model_monitoring import ( + ModelMonitoringAlertConfig, + ModelMonitoringObjectiveConfig, + SamplingStrategy, + ThresholdConfig, +) +from .model_service import ( + DeleteModelRequest, + DeleteModelVersionRequest, + ExportModelOperationMetadata, + ExportModelRequest, + ExportModelResponse, + GetModelEvaluationRequest, + GetModelEvaluationSliceRequest, + GetModelRequest, + ImportModelEvaluationRequest, + ListModelEvaluationSlicesRequest, + ListModelEvaluationSlicesResponse, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, + ListModelsRequest, + ListModelsResponse, + ListModelVersionsRequest, + ListModelVersionsResponse, + MergeVersionAliasesRequest, + UpdateModelRequest, + UploadModelOperationMetadata, + UploadModelRequest, + UploadModelResponse, +) +from .operation import ( + DeleteOperationMetadata, + GenericOperationMetadata, +) +from .pipeline_job import ( + PipelineJob, + PipelineJobDetail, + PipelineTaskDetail, + PipelineTaskExecutorDetail, + PipelineTemplateMetadata, +) +from .pipeline_service import ( + CancelPipelineJobRequest, + CancelTrainingPipelineRequest, + CreatePipelineJobRequest, + CreateTrainingPipelineRequest, + DeletePipelineJobRequest, + DeleteTrainingPipelineRequest, + GetPipelineJobRequest, + GetTrainingPipelineRequest, + ListPipelineJobsRequest, + ListPipelineJobsResponse, + ListTrainingPipelinesRequest, + ListTrainingPipelinesResponse, +) +from .prediction_service import ( + ExplainRequest, + ExplainResponse, + PredictRequest, + PredictResponse, + RawPredictRequest, +) +from .specialist_pool import ( + SpecialistPool, +) +from .specialist_pool_service import ( + CreateSpecialistPoolOperationMetadata, + CreateSpecialistPoolRequest, + DeleteSpecialistPoolRequest, + GetSpecialistPoolRequest, + ListSpecialistPoolsRequest, + ListSpecialistPoolsResponse, + UpdateSpecialistPoolOperationMetadata, + UpdateSpecialistPoolRequest, +) +from .study import ( + Measurement, + Study, + StudySpec, + Trial, +) +from .tensorboard import ( + Tensorboard, +) +from .tensorboard_data import ( + Scalar, + TensorboardBlob, + TensorboardBlobSequence, + TensorboardTensor, + TimeSeriesData, + TimeSeriesDataPoint, +) +from .tensorboard_experiment import ( + TensorboardExperiment, +) +from .tensorboard_run import ( + TensorboardRun, +) +from .tensorboard_service import ( + BatchCreateTensorboardRunsRequest, + BatchCreateTensorboardRunsResponse, + BatchCreateTensorboardTimeSeriesRequest, + BatchCreateTensorboardTimeSeriesResponse, + BatchReadTensorboardTimeSeriesDataRequest, + BatchReadTensorboardTimeSeriesDataResponse, + CreateTensorboardExperimentRequest, + CreateTensorboardOperationMetadata, + CreateTensorboardRequest, + CreateTensorboardRunRequest, + CreateTensorboardTimeSeriesRequest, + DeleteTensorboardExperimentRequest, + DeleteTensorboardRequest, + DeleteTensorboardRunRequest, + DeleteTensorboardTimeSeriesRequest, + ExportTensorboardTimeSeriesDataRequest, + ExportTensorboardTimeSeriesDataResponse, + GetTensorboardExperimentRequest, + GetTensorboardRequest, + GetTensorboardRunRequest, + GetTensorboardTimeSeriesRequest, + ListTensorboardExperimentsRequest, + ListTensorboardExperimentsResponse, + ListTensorboardRunsRequest, + ListTensorboardRunsResponse, + ListTensorboardsRequest, + ListTensorboardsResponse, + ListTensorboardTimeSeriesRequest, + ListTensorboardTimeSeriesResponse, + ReadTensorboardBlobDataRequest, + ReadTensorboardBlobDataResponse, + ReadTensorboardTimeSeriesDataRequest, + ReadTensorboardTimeSeriesDataResponse, + UpdateTensorboardExperimentRequest, + UpdateTensorboardOperationMetadata, + UpdateTensorboardRequest, + UpdateTensorboardRunRequest, + UpdateTensorboardTimeSeriesRequest, + WriteTensorboardExperimentDataRequest, + WriteTensorboardExperimentDataResponse, + WriteTensorboardRunDataRequest, + WriteTensorboardRunDataResponse, +) +from .tensorboard_time_series import ( + TensorboardTimeSeries, +) +from .training_pipeline import ( + FilterSplit, + FractionSplit, + InputDataConfig, + PredefinedSplit, + StratifiedSplit, + TimestampSplit, + TrainingPipeline, +) +from .types import ( + BoolArray, + DoubleArray, + Int64Array, + StringArray, +) +from .unmanaged_container_model import ( + UnmanagedContainerModel, +) +from .user_action_reference import ( + UserActionReference, +) +from .value import ( + Value, +) +from .vizier_service import ( + AddTrialMeasurementRequest, + CheckTrialEarlyStoppingStateMetatdata, + CheckTrialEarlyStoppingStateRequest, + CheckTrialEarlyStoppingStateResponse, + CompleteTrialRequest, + CreateStudyRequest, + CreateTrialRequest, + DeleteStudyRequest, + DeleteTrialRequest, + GetStudyRequest, + GetTrialRequest, + ListOptimalTrialsRequest, + ListOptimalTrialsResponse, + ListStudiesRequest, + ListStudiesResponse, + ListTrialsRequest, + ListTrialsResponse, + LookupStudyRequest, + StopTrialRequest, + SuggestTrialsMetadata, + SuggestTrialsRequest, + SuggestTrialsResponse, +) + +__all__ = ( + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedIndexRef', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'PredictRequestResponseLoggingConfig', + 'PrivateEndpoints', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EntityType', + 'EnvVar', + 'Event', + 'Execution', + 'Attribution', + 'BlurBaselineConfig', + 'ExamplesOverride', + 'ExamplesRestrictionsNamespace', + 'Explanation', + 'ExplanationMetadataOverride', + 'ExplanationParameters', + 'ExplanationSpec', + 'ExplanationSpecOverride', + 'FeatureNoiseSigma', + 'IntegratedGradientsAttribution', + 'ModelExplanation', + 'Neighbor', + 'SampledShapleyAttribution', + 'SmoothGradConfig', + 'XraiAttribution', + 'ExplanationMetadata', + 'Feature', + 'FeatureStatsAnomaly', + 'FeatureSelector', + 'IdMatcher', + 'Featurestore', + 'FeaturestoreMonitoringConfig', + 'FeatureValue', + 'FeatureValueList', + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'BatchCreateFeaturesOperationMetadata', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'BatchReadFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesRequest', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeOperationMetadata', + 'CreateEntityTypeRequest', + 'CreateFeatureOperationMetadata', + 'CreateFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'CreateFeaturestoreRequest', + 'DeleteEntityTypeRequest', + 'DeleteFeatureRequest', + 'DeleteFeaturestoreRequest', + 'DestinationFeatureSetting', + 'ExportFeatureValuesOperationMetadata', + 'ExportFeatureValuesRequest', + 'ExportFeatureValuesResponse', + 'FeatureValueDestination', + 'GetEntityTypeRequest', + 'GetFeatureRequest', + 'GetFeaturestoreRequest', + 'ImportFeatureValuesOperationMetadata', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateEntityTypeRequest', + 'UpdateFeatureRequest', + 'UpdateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreRequest', + 'HyperparameterTuningJob', + 'Index', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexEndpoint', + 'IndexPrivateEndpoints', + 'CreateIndexEndpointOperationMetadata', + 'CreateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexOperationMetadata', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'MutateDeployedIndexOperationMetadata', + 'MutateDeployedIndexRequest', + 'MutateDeployedIndexResponse', + 'UndeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UpdateIndexEndpointRequest', + 'CreateIndexOperationMetadata', + 'CreateIndexRequest', + 'DeleteIndexRequest', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'NearestNeighborSearchOperationMetadata', + 'UpdateIndexOperationMetadata', + 'UpdateIndexRequest', + 'AvroSource', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'CsvDestination', + 'CsvSource', + 'GcsDestination', + 'GcsSource', + 'TFRecordDestination', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'GetModelDeploymentMonitoringJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + 'UpdateModelDeploymentMonitoringJobRequest', + 'JobState', + 'LineageSubgraph', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'NfsMount', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MetadataSchema', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'CreateArtifactRequest', + 'CreateContextRequest', + 'CreateExecutionRequest', + 'CreateMetadataSchemaRequest', + 'CreateMetadataStoreOperationMetadata', + 'CreateMetadataStoreRequest', + 'DeleteArtifactRequest', + 'DeleteContextRequest', + 'DeleteExecutionRequest', + 'DeleteMetadataStoreOperationMetadata', + 'DeleteMetadataStoreRequest', + 'GetArtifactRequest', + 'GetContextRequest', + 'GetExecutionRequest', + 'GetMetadataSchemaRequest', + 'GetMetadataStoreRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'ListContextsRequest', + 'ListContextsResponse', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'PurgeArtifactsMetadata', + 'PurgeArtifactsRequest', + 'PurgeArtifactsResponse', + 'PurgeContextsMetadata', + 'PurgeContextsRequest', + 'PurgeContextsResponse', + 'PurgeExecutionsMetadata', + 'PurgeExecutionsRequest', + 'PurgeExecutionsResponse', + 'QueryArtifactLineageSubgraphRequest', + 'QueryContextLineageSubgraphRequest', + 'QueryExecutionInputsAndOutputsRequest', + 'UpdateArtifactRequest', + 'UpdateContextRequest', + 'UpdateExecutionRequest', + 'MetadataStore', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + 'ModelDeploymentMonitoringObjectiveType', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'ModelMonitoringAlertConfig', + 'ModelMonitoringObjectiveConfig', + 'SamplingStrategy', + 'ThresholdConfig', + 'DeleteModelRequest', + 'DeleteModelVersionRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ImportModelEvaluationRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListModelVersionsRequest', + 'ListModelVersionsResponse', + 'MergeVersionAliasesRequest', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineFailurePolicy', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'PipelineTemplateMetadata', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'ExplainRequest', + 'ExplainResponse', + 'PredictRequest', + 'PredictResponse', + 'RawPredictRequest', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'Study', + 'StudySpec', + 'Trial', + 'Tensorboard', + 'Scalar', + 'TensorboardBlob', + 'TensorboardBlobSequence', + 'TensorboardTensor', + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'TensorboardExperiment', + 'TensorboardRun', + 'BatchCreateTensorboardRunsRequest', + 'BatchCreateTensorboardRunsResponse', + 'BatchCreateTensorboardTimeSeriesRequest', + 'BatchCreateTensorboardTimeSeriesResponse', + 'BatchReadTensorboardTimeSeriesDataRequest', + 'BatchReadTensorboardTimeSeriesDataResponse', + 'CreateTensorboardExperimentRequest', + 'CreateTensorboardOperationMetadata', + 'CreateTensorboardRequest', + 'CreateTensorboardRunRequest', + 'CreateTensorboardTimeSeriesRequest', + 'DeleteTensorboardExperimentRequest', + 'DeleteTensorboardRequest', + 'DeleteTensorboardRunRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'GetTensorboardExperimentRequest', + 'GetTensorboardRequest', + 'GetTensorboardRunRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'UpdateTensorboardExperimentRequest', + 'UpdateTensorboardOperationMetadata', + 'UpdateTensorboardRequest', + 'UpdateTensorboardRunRequest', + 'UpdateTensorboardTimeSeriesRequest', + 'WriteTensorboardExperimentDataRequest', + 'WriteTensorboardExperimentDataResponse', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'TensorboardTimeSeries', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'StratifiedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + 'UnmanagedContainerModel', + 'UserActionReference', + 'Value', + 'AddTrialMeasurementRequest', + 'CheckTrialEarlyStoppingStateMetatdata', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CompleteTrialRequest', + 'CreateStudyRequest', + 'CreateTrialRequest', + 'DeleteStudyRequest', + 'DeleteTrialRequest', + 'GetStudyRequest', + 'GetTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'LookupStudyRequest', + 'StopTrialRequest', + 'SuggestTrialsMetadata', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py new file mode 100644 index 0000000000..80adfb350f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'AcceleratorType', + }, +) + + +class AcceleratorType(proto.Enum): + r"""Represents a hardware accelerator type.""" + ACCELERATOR_TYPE_UNSPECIFIED = 0 + NVIDIA_TESLA_K80 = 1 + NVIDIA_TESLA_P100 = 2 + NVIDIA_TESLA_V100 = 3 + NVIDIA_TESLA_P4 = 4 + NVIDIA_TESLA_T4 = 5 + NVIDIA_TESLA_A100 = 8 + TPU_V2 = 6 + TPU_V3 = 7 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py new file mode 100644 index 0000000000..a7f0f1cf69 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import user_action_reference +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Annotation', + }, +) + + +class Annotation(proto.Message): + r"""Used to assign specific AnnotationSpec to a particular area + of a DataItem or the whole part of the DataItem. + + Attributes: + name (str): + Output only. Resource name of the Annotation. + payload_schema_uri (str): + Required. Google Cloud Storage URI points to a YAML file + describing + [payload][google.cloud.aiplatform.v1.Annotation.payload]. + The schema is defined as an `OpenAPI 3.0.2 Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/, + note that the chosen schema must be consistent with the + parent Dataset's + [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]. + payload (google.protobuf.struct_pb2.Value): + Required. The schema of the payload can be found in + [payload_schema][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was last updated. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + annotation_source (google.cloud.aiplatform_v1.types.UserActionReference): + Output only. The source of the Annotation. + labels (Mapping[str, str]): + Optional. The labels with user-defined metadata to organize + your Annotations. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Annotation(System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Annotation: + + - "aiplatform.googleapis.com/annotation_set_name": + optional, name of the UI's annotation set this Annotation + belongs to. If not set, the Annotation is not visible in + the UI. + + - "aiplatform.googleapis.com/payload_schema": output only, + its value is the + [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri] + title. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + payload_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + payload = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + annotation_source = proto.Field( + proto.MESSAGE, + number=5, + message=user_action_reference.UserActionReference, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py new file mode 100644 index 0000000000..909b5b44af --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'AnnotationSpec', + }, +) + + +class AnnotationSpec(proto.Message): + r"""Identifies a concept with which DataItems may be annotated + with. + + Attributes: + name (str): + Output only. Resource name of the + AnnotationSpec. + display_name (str): + Required. The user-defined name of the + AnnotationSpec. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + AnnotationSpec was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when AnnotationSpec + was last updated. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py new file mode 100644 index 0000000000..81feb44fb6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Artifact', + }, +) + + +class Artifact(proto.Message): + r"""Instance of a general artifact. + + Attributes: + name (str): + Output only. The resource name of the + Artifact. + display_name (str): + User provided display name of the Artifact. + May be up to 128 Unicode characters. + uri (str): + The uniform resource identifier of the + artifact file. May be empty if there is no + actual artifact file. + etag (str): + An eTag used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Artifacts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Artifact (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + last updated. + state (google.cloud.aiplatform_v1.types.Artifact.State): + The state of this Artifact. This is a + property of the Artifact, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex AI + Pipelines), and the system does not prescribe or + check the validity of state transitions. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Artifact. + The size of this field should not exceed 200KB. + description (str): + Description of the Artifact + """ + class State(proto.Enum): + r"""Describes the state of the Artifact.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + LIVE = 2 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + uri = proto.Field( + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + state = proto.Field( + proto.ENUM, + number=13, + enum=State, + ) + schema_title = proto.Field( + proto.STRING, + number=14, + ) + schema_version = proto.Field( + proto.STRING, + number=15, + ) + metadata = proto.Field( + proto.MESSAGE, + number=16, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=17, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py new file mode 100644 index 0000000000..f0b25458a3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -0,0 +1,505 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import completion_stats as gca_completion_stats +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import unmanaged_container_model as gca_unmanaged_container_model +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'BatchPredictionJob', + }, +) + + +class BatchPredictionJob(proto.Message): + r"""A job that uses a + [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to + produce predictions on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the instances fail, the + job may finish without attempting predictions for all remaining + instances. + + Attributes: + name (str): + Output only. Resource name of the + BatchPredictionJob. + display_name (str): + Required. The user-defined name of this + BatchPredictionJob. + model (str): + The name of the Model resoure that produces the predictions + via this job, must share the same ancestor Location. + Starting this job has no impact on any existing deployments + of the Model and their resources. Exactly one of model and + unmanaged_container_model must be set. + + The model resource name may contain version id or version + alias to specify the version, if no version is specified, + the default version will be used. + unmanaged_container_model (google.cloud.aiplatform_v1.types.UnmanagedContainerModel): + Contains model information necessary to perform batch + prediction without requiring uploading to model registry. + Exactly one of model and unmanaged_container_model must be + set. + input_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.InputConfig): + Required. Input configuration of the instances on which + predictions are performed. The schema of any single instance + may be specified via the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + model_parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the predictions. The schema of + the parameters may be specified via the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + output_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputConfig): + Required. The Configuration specifying where output + predictions should be written. The schema of any single + prediction may be specified as a concatenation of + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. + dedicated_resources (google.cloud.aiplatform_v1.types.BatchDedicatedResources): + The config of resources used by the Model during the batch + prediction. If the Model + [supports][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types] + DEDICATED_RESOURCES this config may be provided (and the job + will use these resources), if the Model doesn't support + AUTOMATIC_RESOURCES, this config must be provided. + manual_batch_tuning_parameters (google.cloud.aiplatform_v1.types.ManualBatchTuningParameters): + Immutable. Parameters configuring the batch behavior. + Currently only applicable when + [dedicated_resources][google.cloud.aiplatform.v1.BatchPredictionJob.dedicated_resources] + are used (in other cases Vertex AI does the tuning itself). + generate_explanation (bool): + Generate explanation with the batch prediction results. + + When set to ``true``, the batch prediction output changes + based on the ``predictions_format`` field of the + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config] + object: + + - ``bigquery``: output includes a column named + ``explanation``. The value is a struct that conforms to + the [Explanation][google.cloud.aiplatform.v1.Explanation] + object. + - ``jsonl``: The JSON objects on each line include an + additional entry keyed ``explanation``. The value of the + entry is a JSON object that conforms to the + [Explanation][google.cloud.aiplatform.v1.Explanation] + object. + - ``csv``: Generating explanations for CSV format is not + supported. + + If this field is set to true, either the + [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] + or + [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] + must be populated. + explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): + Explanation configuration for this BatchPredictionJob. Can + be specified only if + [generate_explanation][google.cloud.aiplatform.v1.BatchPredictionJob.generate_explanation] + is set to ``true``. + + This value overrides the value of + [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec]. + All fields of + [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] + are optional in the request. If a field of the + [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] + object is not populated, the corresponding field of the + [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] + object is inherited. + output_info (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputInfo): + Output only. Information further describing + the output of this job. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + For example, single files that can't be read. + This field never exceeds 20 entries. + Status details fields contain standard GCP error + details. + resources_consumed (google.cloud.aiplatform_v1.types.ResourcesConsumed): + Output only. Information about resources that + had been consumed by this job. Provided in real + time at best effort basis, as well as a final + value once the job completes. + + Note: This field currently may be not populated + for batch predictions that use AutoML Models. + completion_stats (google.cloud.aiplatform_v1.types.CompletionStats): + Output only. Statistics on completed and + failed prediction instances. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob for the first + time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob entered any of + the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was most recently updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize BatchPredictionJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key options for a + BatchPredictionJob. If this is set, then all + resources created by the BatchPredictionJob will + be encrypted with the provided encryption key. + """ + + class InputConfig(proto.Message): + r"""Configures the input to + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + See + [Model.supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] + for Model's supported input formats, and how instances should be + expressed via any of them. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + The Cloud Storage location for the input + instances. + + This field is a member of `oneof`_ ``source``. + bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): + The BigQuery location of the input table. + The schema of the table should be in the format + described by the given context OpenAPI Schema, + if one is provided. The table may contain + additional columns that are not described by the + schema, and they will be ignored. + + This field is a member of `oneof`_ ``source``. + instances_format (str): + Required. The format in which instances are given, must be + one of the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats]. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message=io.GcsSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message=io.BigQuerySource, + ) + instances_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputConfig(proto.Message): + r"""Configures the output of + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + See + [Model.supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats] + for supported output formats, and how predictions are expressed via + any of them. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location of the directory where the output + is to be written to. In the given directory a new directory + is created. Its name is + ``prediction--``, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + Inside of it files ``predictions_0001.``, + ``predictions_0002.``, ..., + ``predictions_N.`` are created where + ```` depends on chosen + [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format], + and N may equal 0001 and depends on the total number of + successfully predicted instances. If the Model has both + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] + schemata defined then each such file contains predictions as + per the + [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format]. + If prediction for any instance failed (partially or + completely), then an additional ``errors_0001.``, + ``errors_0002.``,..., ``errors_N.`` + files are created (N depends on total number of failed + predictions). These files contain the failed instances, as + per their schema, followed by an additional ``error`` field + which as value has [google.rpc.Status][google.rpc.Status] + containing only ``code`` and ``message`` fields. + + This field is a member of `oneof`_ ``destination``. + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + The BigQuery project or dataset location where the output is + to be written to. If project is provided, a new dataset is + created with name + ``prediction__`` where + is made BigQuery-dataset-name compatible (for example, most + special characters become underscores), and timestamp is in + YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and + ``errors``. If the Model has both + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] + schemata defined then the tables have columns as follows: + The ``predictions`` table contains instances for which the + prediction succeeded, it has columns as per a concatenation + of the Model's instance and prediction schemata. The + ``errors`` table contains rows for which the prediction has + failed, it has instance columns, as per the instance schema, + followed by a single "errors" column, which as values has + [google.rpc.Status][google.rpc.Status] represented as a + STRUCT, and containing only ``code`` and ``message``. + + This field is a member of `oneof`_ ``destination``. + predictions_format (str): + Required. The format in which Vertex AI gives the + predictions, must be one of the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.BigQueryDestination, + ) + predictions_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputInfo(proto.Message): + r"""Further describes this job's output. Supplements + [output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_output_directory (str): + Output only. The full path of the Cloud + Storage directory created, into which the + prediction output is written. + + This field is a member of `oneof`_ ``output_location``. + bigquery_output_dataset (str): + Output only. The path of the BigQuery dataset created, in + ``bq://projectId.bqDatasetId`` format, into which the + prediction output is written. + + This field is a member of `oneof`_ ``output_location``. + bigquery_output_table (str): + Output only. The name of the BigQuery table created, in + ``predictions_`` format, into which the + prediction output is written. Can be used by UI to generate + the BigQuery output path, for example. + """ + + gcs_output_directory = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + bigquery_output_dataset = proto.Field( + proto.STRING, + number=2, + oneof='output_location', + ) + bigquery_output_table = proto.Field( + proto.STRING, + number=4, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + model = proto.Field( + proto.STRING, + number=3, + ) + unmanaged_container_model = proto.Field( + proto.MESSAGE, + number=28, + message=gca_unmanaged_container_model.UnmanagedContainerModel, + ) + input_config = proto.Field( + proto.MESSAGE, + number=4, + message=InputConfig, + ) + model_parameters = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + output_config = proto.Field( + proto.MESSAGE, + number=6, + message=OutputConfig, + ) + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + message=machine_resources.BatchDedicatedResources, + ) + manual_batch_tuning_parameters = proto.Field( + proto.MESSAGE, + number=8, + message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, + ) + generate_explanation = proto.Field( + proto.BOOL, + number=23, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=25, + message=explanation.ExplanationSpec, + ) + output_info = proto.Field( + proto.MESSAGE, + number=9, + message=OutputInfo, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + error = proto.Field( + proto.MESSAGE, + number=11, + message=status_pb2.Status, + ) + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=12, + message=status_pb2.Status, + ) + resources_consumed = proto.Field( + proto.MESSAGE, + number=13, + message=machine_resources.ResourcesConsumed, + ) + completion_stats = proto.Field( + proto.MESSAGE, + number=14, + message=gca_completion_stats.CompletionStats, + ) + create_time = proto.Field( + proto.MESSAGE, + number=15, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=16, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=17, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=18, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=19, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py new file mode 100644 index 0000000000..37b529facc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CompletionStats', + }, +) + + +class CompletionStats(proto.Message): + r"""Success and error statistics of processing multiple entities + (for example, DataItems or structured data rows) in batch. + + Attributes: + successful_count (int): + Output only. The number of entities that had + been processed successfully. + failed_count (int): + Output only. The number of entities for which + any error was encountered. + incomplete_count (int): + Output only. In cases when enough errors are + encountered a job, pipeline, or operation may be + failed as a whole. Below is the number of + entities for which the processing had not been + finished (either in successful or failed state). + Set to -1 if the number is unknown (for example, + the operation failed before the total entity + number could be collected). + successful_forecast_point_count (int): + Output only. The number of the successful + forecast points that are generated by the + forecasting model. This is ONLY used by the + forecasting batch prediction. + """ + + successful_count = proto.Field( + proto.INT64, + number=1, + ) + failed_count = proto.Field( + proto.INT64, + number=2, + ) + incomplete_count = proto.Field( + proto.INT64, + number=3, + ) + successful_forecast_point_count = proto.Field( + proto.INT64, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py new file mode 100644 index 0000000000..3eadb11fa5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Context', + }, +) + + +class Context(proto.Message): + r"""Instance of a general context. + + Attributes: + name (str): + Output only. The resource name of the + Context. + display_name (str): + User provided display name of the Context. + May be up to 128 Unicode characters. + etag (str): + An eTag used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Contexts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Context (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + last updated. + parent_contexts (Sequence[str]): + Output only. A list of resource names of Contexts that are + parents of this Context. A Context may have at most 10 + parent_contexts. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Context. + The size of this field should not exceed 200KB. + description (str): + Description of the Context + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=9, + ) + create_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + parent_contexts = proto.RepeatedField( + proto.STRING, + number=12, + ) + schema_title = proto.Field( + proto.STRING, + number=13, + ) + schema_version = proto.Field( + proto.STRING, + number=14, + ) + metadata = proto.Field( + proto.MESSAGE, + number=15, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py new file mode 100644 index 0000000000..425c5c9302 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py @@ -0,0 +1,476 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CustomJob', + 'CustomJobSpec', + 'WorkerPoolSpec', + 'ContainerSpec', + 'PythonPackageSpec', + 'Scheduling', + }, +) + + +class CustomJob(proto.Message): + r"""Represents a job that runs custom workloads such as a Docker + container or a Python package. A CustomJob can have multiple + worker pools and each worker pool can have its own machine and + input spec. A CustomJob will be cleaned up once the job enters + terminal state (failed or succeeded). + + Attributes: + name (str): + Output only. Resource name of a CustomJob. + display_name (str): + Required. The display name of the CustomJob. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): + Required. Job spec. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was + created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob for the first time + entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob entered any of the + following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was most + recently updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize CustomJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key options for a + CustomJob. If this is set, then all resources + created by the CustomJob will be encrypted with + the provided encryption key. + web_access_uris (Mapping[str, str]): + Output only. URIs for accessing `interactive + shells `__ + (one URI for each training node). Only available if + [job_spec.enable_web_access][google.cloud.aiplatform.v1.CustomJobSpec.enable_web_access] + is ``true``. + + The keys are names of each node in the training job; for + example, ``workerpool0-0`` for the primary node, + ``workerpool1-0`` for the first node in the second worker + pool, and ``workerpool1-1`` for the second node in the + second worker pool. + + The values are the URIs for each node's interactive shell. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + job_spec = proto.Field( + proto.MESSAGE, + number=4, + message='CustomJobSpec', + ) + state = proto.Field( + proto.ENUM, + number=5, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=12, + message=gca_encryption_spec.EncryptionSpec, + ) + web_access_uris = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + + +class CustomJobSpec(proto.Message): + r"""Represents the spec of a CustomJob. + Next Id: 14 + + Attributes: + worker_pool_specs (Sequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]): + Required. The spec of the worker pools + including machine type and Docker image. All + worker pools except the first one are optional + and can be skipped by providing an empty value. + scheduling (google.cloud.aiplatform_v1.types.Scheduling): + Scheduling options for a CustomJob. + service_account (str): + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this + run-as account. If unspecified, the `Vertex AI Custom Code + Service + Agent `__ + for the CustomJob's project is used. + network (str): + The full name of the Compute Engine + `network `__ + to which the Job should be peered. For example, + ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + To specify this field, you must have already `configured VPC + Network Peering for Vertex + AI `__. + + If this field is left unspecified, the job is not peered + with any network. + reserved_ip_ranges (Sequence[str]): + Optional. A list of names for the reserved ip ranges under + the VPC network that can be used for this job. + + If set, we will deploy the job within the provided ip + ranges. Otherwise, the job will be deployed to any ip ranges + under the provided VPC network. + + Example: ['vertex-ai-ip-range']. + base_output_directory (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location to store the output of this + CustomJob or HyperparameterTuningJob. For + HyperparameterTuningJob, the baseOutputDirectory of each + child CustomJob backing a Trial is set to a subdirectory of + name [id][google.cloud.aiplatform.v1.Trial.id] under its + parent HyperparameterTuningJob's baseOutputDirectory. + + The following Vertex AI environment variables will be passed + to containers or python modules when this field is set: + + For CustomJob: + + - AIP_MODEL_DIR = ``/model/`` + - AIP_CHECKPOINT_DIR = + ``/checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``/logs/`` + + For CustomJob backing a Trial of HyperparameterTuningJob: + + - AIP_MODEL_DIR = + ``//model/`` + - AIP_CHECKPOINT_DIR = + ``//checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``//logs/`` + tensorboard (str): + Optional. The name of a Vertex AI + [Tensorboard][google.cloud.aiplatform.v1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + enable_web_access (bool): + Optional. Whether you want Vertex AI to enable `interactive + shell + access `__ + to training containers. + + If set to ``true``, you can access interactive shells at the + URIs given by + [CustomJob.web_access_uris][google.cloud.aiplatform.v1.CustomJob.web_access_uris] + or + [Trial.web_access_uris][google.cloud.aiplatform.v1.Trial.web_access_uris] + (within + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1.HyperparameterTuningJob.trials]). + """ + + worker_pool_specs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkerPoolSpec', + ) + scheduling = proto.Field( + proto.MESSAGE, + number=3, + message='Scheduling', + ) + service_account = proto.Field( + proto.STRING, + number=4, + ) + network = proto.Field( + proto.STRING, + number=5, + ) + reserved_ip_ranges = proto.RepeatedField( + proto.STRING, + number=13, + ) + base_output_directory = proto.Field( + proto.MESSAGE, + number=6, + message=io.GcsDestination, + ) + tensorboard = proto.Field( + proto.STRING, + number=7, + ) + enable_web_access = proto.Field( + proto.BOOL, + number=10, + ) + + +class WorkerPoolSpec(proto.Message): + r"""Represents the spec of a worker pool in a job. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + container_spec (google.cloud.aiplatform_v1.types.ContainerSpec): + The custom container task. + + This field is a member of `oneof`_ ``task``. + python_package_spec (google.cloud.aiplatform_v1.types.PythonPackageSpec): + The Python packaged task. + + This field is a member of `oneof`_ ``task``. + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Optional. Immutable. The specification of a + single machine. + replica_count (int): + Optional. The number of worker replicas to + use for this worker pool. + nfs_mounts (Sequence[google.cloud.aiplatform_v1.types.NfsMount]): + Optional. List of NFS mount spec. + disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): + Disk spec. + """ + + container_spec = proto.Field( + proto.MESSAGE, + number=6, + oneof='task', + message='ContainerSpec', + ) + python_package_spec = proto.Field( + proto.MESSAGE, + number=7, + oneof='task', + message='PythonPackageSpec', + ) + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message=machine_resources.MachineSpec, + ) + replica_count = proto.Field( + proto.INT64, + number=2, + ) + nfs_mounts = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=machine_resources.NfsMount, + ) + disk_spec = proto.Field( + proto.MESSAGE, + number=5, + message=machine_resources.DiskSpec, + ) + + +class ContainerSpec(proto.Message): + r"""The spec of a Container. + + Attributes: + image_uri (str): + Required. The URI of a container image in the + Container Registry that is to be run on each + worker replica. + command (Sequence[str]): + The command to be invoked when the container + is started. It overrides the entrypoint + instruction in Dockerfile when provided. + args (Sequence[str]): + The arguments to be passed when starting the + container. + env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): + Environment variables to be passed to the + container. Maximum limit is 100. + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=env_var.EnvVar, + ) + + +class PythonPackageSpec(proto.Message): + r"""The spec of a Python packaged code. + + Attributes: + executor_image_uri (str): + Required. The URI of a container image in Artifact Registry + that will run the provided Python package. Vertex AI + provides a wide range of executor images with pre-installed + packages to meet users' various use cases. See the list of + `pre-built containers for + training `__. + You must use an image from this list. + package_uris (Sequence[str]): + Required. The Google Cloud Storage location + of the Python package files which are the + training program and its dependent packages. The + maximum number of package URIs is 100. + python_module (str): + Required. The Python module name to run after + installing the packages. + args (Sequence[str]): + Command line arguments to be passed to the + Python task. + env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): + Environment variables to be passed to the + python module. Maximum limit is 100. + """ + + executor_image_uri = proto.Field( + proto.STRING, + number=1, + ) + package_uris = proto.RepeatedField( + proto.STRING, + number=2, + ) + python_module = proto.Field( + proto.STRING, + number=3, + ) + args = proto.RepeatedField( + proto.STRING, + number=4, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=env_var.EnvVar, + ) + + +class Scheduling(proto.Message): + r"""All parameters related to queuing and scheduling of custom + jobs. + + Attributes: + timeout (google.protobuf.duration_pb2.Duration): + The maximum job running time. The default is + 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + """ + + timeout = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + restart_job_on_worker_restart = proto.Field( + proto.BOOL, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py new file mode 100644 index 0000000000..e4b2a1fefa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'DataItem', + }, +) + + +class DataItem(proto.Message): + r"""A piece of data in a Dataset. Could be an image, a video, a + document or plain text. + + Attributes: + name (str): + Output only. The resource name of the + DataItem. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + last updated. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your DataItems. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one DataItem(System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + payload (google.protobuf.struct_pb2.Value): + Required. The data that the DataItem represents (for + example, an image or a text snippet). The schema of the + payload is stored in the parent Dataset's [metadata + schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + dataItemSchemaUri field. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + payload = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py new file mode 100644 index 0000000000..e0ed7570ba --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import job_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'DataLabelingJob', + 'ActiveLearningConfig', + 'SampleConfig', + 'TrainingConfig', + }, +) + + +class DataLabelingJob(proto.Message): + r"""DataLabelingJob is used to trigger a human labeling job on + unlabeled data from the following Dataset: + + Attributes: + name (str): + Output only. Resource name of the + DataLabelingJob. + display_name (str): + Required. The user-defined name of the + DataLabelingJob. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + Display name of a DataLabelingJob. + datasets (Sequence[str]): + Required. Dataset resource names. Right now we only support + labeling from a single Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + annotation_labels (Mapping[str, str]): + Labels to assign to annotations generated by + this DataLabelingJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. See https://goo.gl/xmQnxf for more + information and examples of labels. System + reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + labeler_count (int): + Required. Number of labelers to work on each + DataItem. + instruction_uri (str): + Required. The Google Cloud Storage location + of the instruction pdf. This pdf is shared with + labelers, and provides detailed description on + how to label DataItems in Datasets. + inputs_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing the config for a + specific type of DataLabelingJob. The schema + files that can be used here are found in the + https://storage.googleapis.com/google-cloud-aiplatform + bucket in the /schema/datalabelingjob/inputs/ + folder. + inputs (google.protobuf.struct_pb2.Value): + Required. Input config parameters for the + DataLabelingJob. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + labeling_progress (int): + Output only. Current labeling job progress percentage scaled + in interval [0, 100], indicating the percentage of DataItems + that has been finished. + current_spend (google.type.money_pb2.Money): + Output only. Estimated cost(in US dollars) + that the DataLabelingJob has incurred to date. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was updated most recently. + error (google.rpc.status_pb2.Status): + Output only. DataLabelingJob errors. It is only populated + when job's state is ``JOB_STATE_FAILED`` or + ``JOB_STATE_CANCELLED``. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + DataLabelingJobs. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each DataLabelingJob: + + - "aiplatform.googleapis.com/schema": output only, its + value is the + [inputs_schema][google.cloud.aiplatform.v1.DataLabelingJob.inputs_schema_uri]'s + title. + specialist_pools (Sequence[str]): + The SpecialistPools' resource names + associated with this job. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + DataLabelingJob. If set, this DataLabelingJob + will be secured by this key. + Note: Annotations created in the DataLabelingJob + are associated with the EncryptionSpec of the + Dataset they are exported to. + active_learning_config (google.cloud.aiplatform_v1.types.ActiveLearningConfig): + Parameters that configure the active learning + pipeline. Active learning will label the data + incrementally via several iterations. For every + iteration, it will select a batch of data based + on the sampling strategy. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + datasets = proto.RepeatedField( + proto.STRING, + number=3, + ) + annotation_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) + labeler_count = proto.Field( + proto.INT32, + number=4, + ) + instruction_uri = proto.Field( + proto.STRING, + number=5, + ) + inputs_schema_uri = proto.Field( + proto.STRING, + number=6, + ) + inputs = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Value, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=job_state.JobState, + ) + labeling_progress = proto.Field( + proto.INT32, + number=13, + ) + current_spend = proto.Field( + proto.MESSAGE, + number=14, + message=money_pb2.Money, + ) + create_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=22, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + specialist_pools = proto.RepeatedField( + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=20, + message=gca_encryption_spec.EncryptionSpec, + ) + active_learning_config = proto.Field( + proto.MESSAGE, + number=21, + message='ActiveLearningConfig', + ) + + +class ActiveLearningConfig(proto.Message): + r"""Parameters that configure the active learning pipeline. + Active learning will label the data incrementally by several + iterations. For every iteration, it will select a batch of data + based on the sampling strategy. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + max_data_item_count (int): + Max number of human labeled DataItems. + + This field is a member of `oneof`_ ``human_labeling_budget``. + max_data_item_percentage (int): + Max percent of total DataItems for human + labeling. + + This field is a member of `oneof`_ ``human_labeling_budget``. + sample_config (google.cloud.aiplatform_v1.types.SampleConfig): + Active learning data sampling config. For + every active learning labeling iteration, it + will select a batch of data based on the + sampling strategy. + training_config (google.cloud.aiplatform_v1.types.TrainingConfig): + CMLE training config. For every active + learning labeling iteration, system will train a + machine learning model on CMLE. The trained + model will be used by data sampling algorithm to + select DataItems. + """ + + max_data_item_count = proto.Field( + proto.INT64, + number=1, + oneof='human_labeling_budget', + ) + max_data_item_percentage = proto.Field( + proto.INT32, + number=2, + oneof='human_labeling_budget', + ) + sample_config = proto.Field( + proto.MESSAGE, + number=3, + message='SampleConfig', + ) + training_config = proto.Field( + proto.MESSAGE, + number=4, + message='TrainingConfig', + ) + + +class SampleConfig(proto.Message): + r"""Active learning data sampling config. For every active + learning labeling iteration, it will select a batch of data + based on the sampling strategy. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + initial_batch_sample_percentage (int): + The percentage of data needed to be labeled + in the first batch. + + This field is a member of `oneof`_ ``initial_batch_sample_size``. + following_batch_sample_percentage (int): + The percentage of data needed to be labeled + in each following batch (except the first + batch). + + This field is a member of `oneof`_ ``following_batch_sample_size``. + sample_strategy (google.cloud.aiplatform_v1.types.SampleConfig.SampleStrategy): + Field to choose sampling strategy. Sampling + strategy will decide which data should be + selected for human labeling in every batch. + """ + class SampleStrategy(proto.Enum): + r"""Sample strategy decides which subset of DataItems should be + selected for human labeling in every batch. + """ + SAMPLE_STRATEGY_UNSPECIFIED = 0 + UNCERTAINTY = 1 + + initial_batch_sample_percentage = proto.Field( + proto.INT32, + number=1, + oneof='initial_batch_sample_size', + ) + following_batch_sample_percentage = proto.Field( + proto.INT32, + number=3, + oneof='following_batch_sample_size', + ) + sample_strategy = proto.Field( + proto.ENUM, + number=5, + enum=SampleStrategy, + ) + + +class TrainingConfig(proto.Message): + r"""CMLE training config. For every active learning labeling + iteration, system will train a machine learning model on CMLE. + The trained model will be used by data sampling algorithm to + select DataItems. + + Attributes: + timeout_training_milli_hours (int): + The timeout hours for the CMLE training job, + expressed in milli hours i.e. 1,000 value in + this field means 1 hour. + """ + + timeout_training_milli_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py new file mode 100644 index 0000000000..cac424012a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import io +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Dataset', + 'ImportDataConfig', + 'ExportDataConfig', + }, +) + + +class Dataset(proto.Message): + r"""A collection of DataItems and Annotations on them. + + Attributes: + name (str): + Output only. The resource name of the + Dataset. + display_name (str): + Required. The user-defined name of the + Dataset. The name can be up to 128 characters + long and can be consist of any UTF-8 characters. + description (str): + Optional. The description of the Dataset. + metadata_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing additional + information about the Dataset. The schema is + defined as an OpenAPI 3.0.2 Schema Object. The + schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/metadata/. + metadata (google.protobuf.struct_pb2.Value): + Required. Additional information about the + Dataset. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + last updated. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + output only, its value is the + [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + title. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Dataset. If set, this Dataset and all + sub-resources of this Dataset will be secured by + this key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metadata = proto.Field( + proto.MESSAGE, + number=8, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=11, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class ImportDataConfig(proto.Message): + r"""Describes the location from where we import data into a + Dataset, together with the labels that will be applied to the + DataItems and the Annotations. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + The Google Cloud Storage location for the + input content. + + This field is a member of `oneof`_ ``source``. + data_item_labels (Mapping[str, str]): + Labels that will be applied to newly imported DataItems. If + an identical DataItem as one being imported already exists + in the Dataset, then these labels will be appended to these + of the already existing one, and if labels with identical + key is imported before, the old label value will be + overwritten. If two DataItems are identical in the same + import data operation, the labels will be combined and if + key collision happens in this case, one of the values will + be picked randomly. Two DataItems are considered identical + if their content bytes are identical (e.g. image bytes or + pdf bytes). These labels will be overridden by Annotation + labels specified inside index file referenced by + [import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri], + e.g. jsonl file. + import_schema_uri (str): + Required. Points to a YAML file stored on Google Cloud + Storage describing the import format. Validation will be + done against the schema. The schema is defined as an + `OpenAPI 3.0.2 Schema + Object `__. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message=io.GcsSource, + ) + data_item_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + import_schema_uri = proto.Field( + proto.STRING, + number=4, + ) + + +class ExportDataConfig(proto.Message): + r"""Describes what part of the Dataset is to be exported, the + destination of the export and how to export. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Google Cloud Storage location where the output is to be + written to. In the given directory a new directory will be + created with name: + ``export-data--`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All export output will be written into that + directory. Inside that directory, annotations with the same + schema will be grouped into sub directories which are named + with the corresponding annotations' schema title. Inside + these sub directories, a schema.yaml will be created to + describe the output format. + + This field is a member of `oneof`_ ``destination``. + annotations_filter (str): + A filter on Annotations of the Dataset. Only Annotations on + to-be-exported DataItems(specified by [data_items_filter][]) + that match this filter will be exported. The filter syntax + is the same as in + [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message=io.GcsDestination, + ) + annotations_filter = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py new file mode 100644 index 0000000000..65c006687b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py @@ -0,0 +1,543 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateDatasetRequest', + 'CreateDatasetOperationMetadata', + 'GetDatasetRequest', + 'UpdateDatasetRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'DeleteDatasetRequest', + 'ImportDataRequest', + 'ImportDataResponse', + 'ImportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'ExportDataOperationMetadata', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'GetAnnotationSpecRequest', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Dataset in. Format: + ``projects/{project}/locations/{location}`` + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + dataset = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.Dataset, + ) + + +class CreateDatasetOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. + + Attributes: + name (str): + Required. The name of the Dataset resource. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. + + Attributes: + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + """ + + dataset = proto.Field( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Attributes: + parent (str): + Required. The name of the Dataset's parent resource. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``display_name``: supports = and != + - ``metadata_schema_uri``: supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Attributes: + datasets (Sequence[google.cloud.aiplatform_v1.types.Dataset]): + A list of Datasets that matches the specified + filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + datasets = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): + Required. The desired input locations. The + contents of all input locations will be imported + in one batch. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + import_configs = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gca_dataset.ImportDataConfig, + ) + + +class ImportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + """ + + +class ImportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): + Required. The desired output location. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + export_config = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.ExportDataConfig, + ) + + +class ExportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + Attributes: + exported_files (Sequence[str]): + All of the files that are exported in this + export operation. + """ + + exported_files = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class ExportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + gcs_output_directory (str): + A Google Cloud Storage directory which path + ends with '/'. The exported data is stored in + the directory. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + + +class ListDataItemsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Attributes: + parent (str): + Required. The resource name of the Dataset to list DataItems + from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataItemsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Attributes: + data_items (Sequence[google.cloud.aiplatform_v1.types.DataItem]): + A list of DataItems that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_items = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=data_item.DataItem, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The name of the AnnotationSpec resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListAnnotationsRequest(proto.Message): + r"""Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Attributes: + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListAnnotationsResponse(proto.Message): + r"""Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Attributes: + annotations (Sequence[google.cloud.aiplatform_v1.types.Annotation]): + A list of Annotations that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + annotations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=annotation.Annotation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py new file mode 100644 index 0000000000..99c0afde39 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'DeployedIndexRef', + }, +) + + +class DeployedIndexRef(proto.Message): + r"""Points to a DeployedIndex. + + Attributes: + index_endpoint (str): + Immutable. A resource name of the + IndexEndpoint. + deployed_index_id (str): + Immutable. The ID of the DeployedIndex in the + above IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py new file mode 100644 index 0000000000..6b79b46dbc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'DeployedModelRef', + }, +) + + +class DeployedModelRef(proto.Message): + r"""Points to a DeployedModel. + + Attributes: + endpoint (str): + Immutable. A resource name of an Endpoint. + deployed_model_id (str): + Immutable. An ID of a DeployedModel in the + above Endpoint. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py new file mode 100644 index 0000000000..8e1a552ff7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'EncryptionSpec', + }, +) + + +class EncryptionSpec(proto.Message): + r"""Represents a customer-managed encryption key spec that can be + applied to a top-level resource. + + Attributes: + kms_key_name (str): + Required. The Cloud KMS resource identifier of the customer + managed encryption key used to protect a resource. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + """ + + kms_key_name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py new file mode 100644 index 0000000000..efd0134dbf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py @@ -0,0 +1,422 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Endpoint', + 'DeployedModel', + 'PrivateEndpoints', + 'PredictRequestResponseLoggingConfig', + }, +) + + +class Endpoint(proto.Message): + r"""Models are deployed into it, and afterwards Endpoint is + called to obtain predictions and explanations. + + Attributes: + name (str): + Output only. The resource name of the + Endpoint. + display_name (str): + Required. The display name of the Endpoint. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Endpoint. + deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModel]): + Output only. The models deployed in this Endpoint. To add or + remove DeployedModels use + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel] + and + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel] + respectively. + traffic_split (Mapping[str, int]): + A map from a DeployedModel's ID to the + percentage of this Endpoint's traffic that + should be forwarded to that DeployedModel. + If a DeployedModel's ID is not listed in this + map, then it receives no traffic. + + The traffic percentage values must add up to + 100, or map must be empty if the Endpoint is to + not accept any traffic at a moment. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Endpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + last updated. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for an + Endpoint. If set, this Endpoint and all + sub-resources of this Endpoint will be secured + by this key. + network (str): + The full name of the Google Compute Engine + `network `__ + to which the Endpoint should be peered. + + Private services access must already be configured for the + network. If left unspecified, the Endpoint is not peered + with any network. + + Only one of the fields, + [network][google.cloud.aiplatform.v1.Endpoint.network] or + [enable_private_service_connect][google.cloud.aiplatform.v1.Endpoint.enable_private_service_connect], + can be set. + + `Format `__: + ``projects/{project}/global/networks/{network}``. Where + ``{project}`` is a project number, as in ``12345``, and + ``{network}`` is network name. + enable_private_service_connect (bool): + Deprecated: If true, expose the Endpoint via private service + connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1.Endpoint.network] or + [enable_private_service_connect][google.cloud.aiplatform.v1.Endpoint.enable_private_service_connect], + can be set. + model_deployment_monitoring_job (str): + Output only. Resource name of the Model Monitoring job + associated with this Endpoint if monitoring is enabled by + [CreateModelDeploymentMonitoringJob][]. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + predict_request_response_logging_config (google.cloud.aiplatform_v1.types.PredictRequestResponseLoggingConfig): + Configures the request-response logging for + online prediction. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='DeployedModel', + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=5, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + create_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=10, + message=gca_encryption_spec.EncryptionSpec, + ) + network = proto.Field( + proto.STRING, + number=13, + ) + enable_private_service_connect = proto.Field( + proto.BOOL, + number=17, + ) + model_deployment_monitoring_job = proto.Field( + proto.STRING, + number=14, + ) + predict_request_response_logging_config = proto.Field( + proto.MESSAGE, + number=18, + message='PredictRequestResponseLoggingConfig', + ) + + +class DeployedModel(proto.Message): + r"""A deployment of a Model. Endpoints contain one or more + DeployedModels. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + dedicated_resources (google.cloud.aiplatform_v1.types.DedicatedResources): + A description of resources that are dedicated + to the DeployedModel, and that need a higher + degree of manual configuration. + + This field is a member of `oneof`_ ``prediction_resources``. + automatic_resources (google.cloud.aiplatform_v1.types.AutomaticResources): + A description of resources that to large + degree are decided by Vertex AI, and require + only a modest additional configuration. + + This field is a member of `oneof`_ ``prediction_resources``. + id (str): + Immutable. The ID of the DeployedModel. If not provided upon + deployment, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. + model (str): + Required. The resource name of the Model that + this is the deployment of. Note that the Model + may be in a different location than the + DeployedModel's Endpoint. + The resource name may contain version id or + version alias to specify the version, if no + version is specified, the default version will + be deployed. + display_name (str): + The display name of the DeployedModel. If not provided upon + creation, the Model's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedModel + was created. + explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): + Explanation configuration for this DeployedModel. + + When deploying a Model using + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel], + this value overrides the value of + [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec]. + All fields of + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + are optional in the request. If a field of + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + is not populated, the value of the same field of + [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] + is inherited. If the corresponding + [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] + is not populated, all fields of the + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + will be used for the explanation configuration. + service_account (str): + The service account that the DeployedModel's container runs + as. Specify the email address of the service account. If + this service account is not specified, the container runs as + a service account that doesn't have access to the resource + project. + + Users deploying the Model must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + disable_container_logging (bool): + For custom-trained Models and AutoML Tabular Models, the + container of the DeployedModel instances will send + ``stderr`` and ``stdout`` streams to Stackdriver Logging by + default. Please note that the logs incur cost, which are + subject to `Cloud Logging + pricing `__. + + User can disable container logging by setting this flag to + true. + enable_access_logging (bool): + These logs are like standard server access + logs, containing information like timestamp and + latency for each prediction request. + Note that Stackdriver logs may incur a cost, + especially if your project receives prediction + requests at a high queries per second rate + (QPS). Estimate your costs before enabling this + option. + private_endpoints (google.cloud.aiplatform_v1.types.PrivateEndpoints): + Output only. Provide paths for users to send + predict/explain/health requests directly to the deployed + model services running on Cloud via private services access. + This field is populated if + [network][google.cloud.aiplatform.v1.Endpoint.network] is + configured. + """ + + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + oneof='prediction_resources', + message=machine_resources.DedicatedResources, + ) + automatic_resources = proto.Field( + proto.MESSAGE, + number=8, + oneof='prediction_resources', + message=machine_resources.AutomaticResources, + ) + id = proto.Field( + proto.STRING, + number=1, + ) + model = proto.Field( + proto.STRING, + number=2, + ) + display_name = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=9, + message=explanation.ExplanationSpec, + ) + service_account = proto.Field( + proto.STRING, + number=11, + ) + disable_container_logging = proto.Field( + proto.BOOL, + number=15, + ) + enable_access_logging = proto.Field( + proto.BOOL, + number=13, + ) + private_endpoints = proto.Field( + proto.MESSAGE, + number=14, + message='PrivateEndpoints', + ) + + +class PrivateEndpoints(proto.Message): + r"""PrivateEndpoints proto is used to provide paths for users to send + requests privately. To send request via private service access, use + predict_http_uri, explain_http_uri or health_http_uri. To send + request via private service connect, use service_attachment. + + Attributes: + predict_http_uri (str): + Output only. Http(s) path to send prediction + requests. + explain_http_uri (str): + Output only. Http(s) path to send explain + requests. + health_http_uri (str): + Output only. Http(s) path to send health + check requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. + """ + + predict_http_uri = proto.Field( + proto.STRING, + number=1, + ) + explain_http_uri = proto.Field( + proto.STRING, + number=2, + ) + health_http_uri = proto.Field( + proto.STRING, + number=3, + ) + service_attachment = proto.Field( + proto.STRING, + number=4, + ) + + +class PredictRequestResponseLoggingConfig(proto.Message): + r"""Configuration for logging request-response to a BigQuery + table. + + Attributes: + enabled (bool): + If logging is enabled or not. + sampling_rate (float): + Percentage of requests to be logged, expressed as a fraction + in range(0,1]. + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + BigQuery table for logging. If only given a project, a new + dataset will be created with name + ``logging__`` where will + be made BigQuery-dataset-name compatible (e.g. most special + characters will become underscores). If no table name is + given, a new table will be created with name + ``request_response_logging`` + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + sampling_rate = proto.Field( + proto.DOUBLE, + number=2, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=3, + message=io.BigQueryDestination, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py new file mode 100644 index 0000000000..eba8d37f59 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -0,0 +1,409 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateEndpointRequest', + 'CreateEndpointOperationMetadata', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UpdateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelRequest', + 'DeployModelResponse', + 'DeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UndeployModelOperationMetadata', + }, +) + + +class CreateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Endpoint in. Format: + ``projects/{project}/locations/{location}`` + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint to create. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become the + final component of the endpoint resource name. If not + provided, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. When using HTTP/JSON, this field is populated + based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields that + are not included in either the URI or the body. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + endpoint = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.Endpoint, + ) + endpoint_id = proto.Field( + proto.STRING, + number=4, + ) + + +class CreateEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] + + Attributes: + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEndpointsRequest(proto.Message): + r"""Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``endpoint`` supports = and !=. ``endpoint`` represents + the Endpoint ID, i.e. the last segment of the Endpoint's + [resource + name][google.cloud.aiplatform.v1.Endpoint.name]. + - ``display_name`` supports = and, != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``endpoint=1`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1.ListEndpointsResponse.next_page_token] + of the previous + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListEndpointsResponse(proto.Message): + r"""Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Attributes: + endpoints (Sequence[google.cloud.aiplatform_v1.types.Endpoint]): + List of Endpoints in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1.ListEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + endpoints = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + + Attributes: + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + endpoint = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. + + Attributes: + name (str): + Required. The name of the Endpoint resource to be deleted. + Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource into which to + deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + traffic_split (Mapping[str, int]): + A map from a DeployedModel's ID to the percentage of this + Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the just + being deployed Model, a "0" should be used, and the actual + ID of the new DeployedModel will be filled in its place by + this method. The traffic percentage values must add up to + 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + is not updated. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.DeployedModel, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class DeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + Attributes: + deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): + The DeployedModel that had been deployed in + the Endpoint. + """ + + deployed_model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.DeployedModel, + ) + + +class DeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource from which to + undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model_id (str): + Required. The ID of the DeployedModel to be + undeployed from the Endpoint. + traffic_split (Mapping[str, int]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is being + undeployed from the Endpoint, the [Endpoint.traffic_split] + will always end up empty when this call returns. A + DeployedModel will be successfully undeployed only if it + doesn't have any traffic assigned to it when this method + executes, or if this field unassigns any traffic to it. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class UndeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + """ + + +class UndeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py new file mode 100644 index 0000000000..c642aa2a59 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import featurestore_monitoring +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'EntityType', + }, +) + + +class EntityType(proto.Message): + r"""An entity type is a type of object in a system that needs to + be modeled and have stored information about. For example, + driver is an entity type, and driver0 is an instance of an + entity type driver. + + Attributes: + name (str): + Immutable. Name of the EntityType. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + The last part entity_type is assigned by the client. The + entity_type can be up to 64 characters long and can consist + only of ASCII Latin letters A-Z and a-z and underscore(_), + and ASCII digits 0-9 starting with a letter. The value will + be unique given a featurestore. + description (str): + Optional. Description of the EntityType. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your EntityTypes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one EntityType + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Optional. Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + monitoring_config (google.cloud.aiplatform_v1.types.FeaturestoreMonitoringConfig): + Optional. The default monitoring configuration for all + Features with value type + ([Feature.ValueType][google.cloud.aiplatform.v1.Feature.ValueType]) + BOOL, STRING, DOUBLE or INT64 under this EntityType. + + If this is populated with + [FeaturestoreMonitoringConfig.monitoring_interval] + specified, snapshot analysis monitoring is enabled. + Otherwise, snapshot analysis monitoring is disabled. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + monitoring_config = proto.Field( + proto.MESSAGE, + number=8, + message=featurestore_monitoring.FeaturestoreMonitoringConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py new file mode 100644 index 0000000000..98498f188d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'EnvVar', + }, +) + + +class EnvVar(proto.Message): + r"""Represents an environment variable present in a Container or + Python Module. + + Attributes: + name (str): + Required. Name of the environment variable. + Must be a valid C identifier. + value (str): + Required. Variables that reference a $(VAR_NAME) are + expanded using the previous defined environment variables in + the container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped + references will never be expanded, regardless of whether the + variable exists or not. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py new file mode 100644 index 0000000000..3ad3beff64 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Event', + }, +) + + +class Event(proto.Message): + r"""An edge describing the relationship between an Artifact and + an Execution in a lineage graph. + + Attributes: + artifact (str): + Required. The relative resource name of the + Artifact in the Event. + execution (str): + Output only. The relative resource name of + the Execution in the Event. + event_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time the Event occurred. + type_ (google.cloud.aiplatform_v1.types.Event.Type): + Required. The type of the Event. + labels (Mapping[str, str]): + The labels with user-defined metadata to + annotate Events. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Event (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + """ + class Type(proto.Enum): + r"""Describes whether an Event's Artifact is the Execution's + input or output. + """ + TYPE_UNSPECIFIED = 0 + INPUT = 1 + OUTPUT = 2 + + artifact = proto.Field( + proto.STRING, + number=1, + ) + execution = proto.Field( + proto.STRING, + number=2, + ) + event_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + type_ = proto.Field( + proto.ENUM, + number=4, + enum=Type, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py new file mode 100644 index 0000000000..25203a9204 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Execution', + }, +) + + +class Execution(proto.Message): + r"""Instance of a general execution. + + Attributes: + name (str): + Output only. The resource name of the + Execution. + display_name (str): + User provided display name of the Execution. + May be up to 128 Unicode characters. + state (google.cloud.aiplatform_v1.types.Execution.State): + The state of this Execution. This is a + property of the Execution, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex AI Pipelines) + and the system does not prescribe or check the + validity of state transitions. + etag (str): + An eTag used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Executions. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Execution (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was last updated. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in ``schema_title`` to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Execution. + The size of this field should not exceed 200KB. + description (str): + Description of the Execution + """ + class State(proto.Enum): + r"""Describes the state of the Execution.""" + STATE_UNSPECIFIED = 0 + NEW = 1 + RUNNING = 2 + COMPLETE = 3 + FAILED = 4 + CACHED = 5 + CANCELLED = 6 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + schema_title = proto.Field( + proto.STRING, + number=13, + ) + schema_version = proto.Field( + proto.STRING, + number=14, + ) + metadata = proto.Field( + proto.MESSAGE, + number=15, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py new file mode 100644 index 0000000000..eb6eb1c598 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py @@ -0,0 +1,838 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import explanation_metadata +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Explanation', + 'ModelExplanation', + 'Attribution', + 'Neighbor', + 'ExplanationSpec', + 'ExplanationParameters', + 'SampledShapleyAttribution', + 'IntegratedGradientsAttribution', + 'XraiAttribution', + 'SmoothGradConfig', + 'FeatureNoiseSigma', + 'BlurBaselineConfig', + 'ExplanationSpecOverride', + 'ExplanationMetadataOverride', + 'ExamplesOverride', + 'ExamplesRestrictionsNamespace', + }, +) + + +class Explanation(proto.Message): + r"""Explanation of a prediction (provided in + [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions]) + produced by the Model on a given + [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]. + + Attributes: + attributions (Sequence[google.cloud.aiplatform_v1.types.Attribution]): + Output only. Feature attributions grouped by predicted + outputs. + + For Models that predict only one output, such as regression + Models that predict only one score, there is only one + attibution that explains the predicted output. For Models + that predict multiple outputs, such as multiclass Models + that predict multiple classes, each element explains one + specific item. + [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] + can be used to identify which output this attribution is + explaining. + + If users set + [ExplanationParameters.top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k], + the attributions are sorted by + [instance_output_value][Attributions.instance_output_value] + in descending order. If + [ExplanationParameters.output_indices][google.cloud.aiplatform.v1.ExplanationParameters.output_indices] + is specified, the attributions are stored by + [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] + in the same order as they appear in the output_indices. + neighbors (Sequence[google.cloud.aiplatform_v1.types.Neighbor]): + Output only. List of the nearest neighbors + for example-based explanations. + For models deployed with the examples + explanations feature enabled, the attributions + field is empty and instead the neighbors field + is populated. + """ + + attributions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Attribution', + ) + neighbors = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='Neighbor', + ) + + +class ModelExplanation(proto.Message): + r"""Aggregated explanation metrics for a Model over a set of + instances. + + Attributes: + mean_attributions (Sequence[google.cloud.aiplatform_v1.types.Attribution]): + Output only. Aggregated attributions explaining the Model's + prediction outputs over the set of instances. The + attributions are grouped by outputs. + + For Models that predict only one output, such as regression + Models that predict only one score, there is only one + attibution that explains the predicted output. For Models + that predict multiple outputs, such as multiclass Models + that predict multiple classes, each element explains one + specific item. + [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] + can be used to identify which output this attribution is + explaining. + + The + [baselineOutputValue][google.cloud.aiplatform.v1.Attribution.baseline_output_value], + [instanceOutputValue][google.cloud.aiplatform.v1.Attribution.instance_output_value] + and + [featureAttributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] + fields are averaged over the test data. + + NOTE: Currently AutoML tabular classification Models produce + only one attribution, which averages attributions over all + the classes it predicts. + [Attribution.approximation_error][google.cloud.aiplatform.v1.Attribution.approximation_error] + is not populated. + """ + + mean_attributions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Attribution', + ) + + +class Attribution(proto.Message): + r"""Attribution that explains a particular prediction output. + + Attributes: + baseline_output_value (float): + Output only. Model predicted output if the input instance is + constructed from the baselines of all the features defined + in + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. + The field name of the output is determined by the key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs]. + + If the Model's predicted output has multiple dimensions + (rank > 1), this is the value in the output located by + [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. + + If there are multiple baselines, their output values are + averaged. + instance_output_value (float): + Output only. Model predicted output on the corresponding + [explanation instance][ExplainRequest.instances]. The field + name of the output is determined by the key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs]. + + If the Model predicted output has multiple dimensions, this + is the value in the output located by + [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. + feature_attributions (google.protobuf.struct_pb2.Value): + Output only. Attributions of each explained feature. + Features are extracted from the [prediction + instances][google.cloud.aiplatform.v1.ExplainRequest.instances] + according to [explanation metadata for + inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. + + The value is a struct, whose keys are the name of the + feature. The values are how much the feature in the + [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] + contributed to the predicted result. + + The format of the value is determined by the feature's input + format: + + - If the feature is a scalar value, the attribution value + is a [floating + number][google.protobuf.Value.number_value]. + + - If the feature is an array of scalar values, the + attribution value is an + [array][google.protobuf.Value.list_value]. + + - If the feature is a struct, the attribution value is a + [struct][google.protobuf.Value.struct_value]. The keys in + the attribution value struct are the same as the keys in + the feature struct. The formats of the values in the + attribution struct are determined by the formats of the + values in the feature struct. + + The + [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] + field, pointed to by the + [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] + field of the + [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + object, points to the schema file that describes the + features and their attribution values (if it is populated). + output_index (Sequence[int]): + Output only. The index that locates the explained prediction + output. + + If the prediction output is a scalar value, output_index is + not populated. If the prediction output has multiple + dimensions, the length of the output_index list is the same + as the number of dimensions of the output. The i-th element + in output_index is the element index of the i-th dimension + of the output vector. Indices start from 0. + output_display_name (str): + Output only. The display name of the output identified by + [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. + For example, the predicted class name by a + multi-classification Model. + + This field is only populated iff the Model predicts display + names as a separate field along with the explained output. + The predicted display name must has the same shape of the + explained output, and can be located using output_index. + approximation_error (float): + Output only. Error of + [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] + caused by approximation used in the explanation method. + Lower value means more precise attributions. + + - For Sampled Shapley + [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution], + increasing + [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] + might reduce the error. + - For Integrated Gradients + [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution], + increasing + [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] + might reduce the error. + - For [XRAI + attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution], + increasing + [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] + might reduce the error. + + See `this + introduction `__ + for more information. + output_name (str): + Output only. Name of the explain output. Specified as the + key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs]. + """ + + baseline_output_value = proto.Field( + proto.DOUBLE, + number=1, + ) + instance_output_value = proto.Field( + proto.DOUBLE, + number=2, + ) + feature_attributions = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + output_index = proto.RepeatedField( + proto.INT32, + number=4, + ) + output_display_name = proto.Field( + proto.STRING, + number=5, + ) + approximation_error = proto.Field( + proto.DOUBLE, + number=6, + ) + output_name = proto.Field( + proto.STRING, + number=7, + ) + + +class Neighbor(proto.Message): + r"""Neighbors for example-based explanations. + + Attributes: + neighbor_id (str): + Output only. The neighbor id. + neighbor_distance (float): + Output only. The neighbor distance. + """ + + neighbor_id = proto.Field( + proto.STRING, + number=1, + ) + neighbor_distance = proto.Field( + proto.DOUBLE, + number=2, + ) + + +class ExplanationSpec(proto.Message): + r"""Specification of Model explanation. + + Attributes: + parameters (google.cloud.aiplatform_v1.types.ExplanationParameters): + Required. Parameters that configure + explaining of the Model's predictions. + metadata (google.cloud.aiplatform_v1.types.ExplanationMetadata): + Required. Metadata describing the Model's + input and output for explanation. + """ + + parameters = proto.Field( + proto.MESSAGE, + number=1, + message='ExplanationParameters', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message=explanation_metadata.ExplanationMetadata, + ) + + +class ExplanationParameters(proto.Message): + r"""Parameters to configure explaining for Model's predictions. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + sampled_shapley_attribution (google.cloud.aiplatform_v1.types.SampledShapleyAttribution): + An attribution method that approximates + Shapley values for features that contribute to + the label being predicted. A sampling strategy + is used to approximate the value rather than + considering all subsets of features. Refer to + this paper for model details: + https://arxiv.org/abs/1306.4265. + + This field is a member of `oneof`_ ``method``. + integrated_gradients_attribution (google.cloud.aiplatform_v1.types.IntegratedGradientsAttribution): + An attribution method that computes + Aumann-Shapley values taking advantage of the + model's fully differentiable structure. Refer to + this paper for more details: + https://arxiv.org/abs/1703.01365 + + This field is a member of `oneof`_ ``method``. + xrai_attribution (google.cloud.aiplatform_v1.types.XraiAttribution): + An attribution method that redistributes + Integrated Gradients attribution to segmented + regions, taking advantage of the model's fully + differentiable structure. Refer to this paper + for more details: + https://arxiv.org/abs/1906.02825 + XRAI currently performs better on natural + images, like a picture of a house or an animal. + If the images are taken in artificial + environments, like a lab or manufacturing line, + or from diagnostic equipment, like x-rays or + quality-control cameras, use Integrated + Gradients instead. + + This field is a member of `oneof`_ ``method``. + top_k (int): + If populated, returns attributions for top K + indices of outputs (defaults to 1). Only applies + to Models that predicts more than one outputs + (e,g, multi-class Models). When set to -1, + returns explanations for all outputs. + output_indices (google.protobuf.struct_pb2.ListValue): + If populated, only returns attributions that have + [output_index][google.cloud.aiplatform.v1.Attribution.output_index] + contained in output_indices. It must be an ndarray of + integers, with the same shape of the output it's explaining. + + If not populated, returns attributions for + [top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k] + indices of outputs. If neither top_k nor output_indeices is + populated, returns the argmax index of the outputs. + + Only applicable to Models that predict multiple outputs + (e,g, multi-class Models that predict multiple classes). + """ + + sampled_shapley_attribution = proto.Field( + proto.MESSAGE, + number=1, + oneof='method', + message='SampledShapleyAttribution', + ) + integrated_gradients_attribution = proto.Field( + proto.MESSAGE, + number=2, + oneof='method', + message='IntegratedGradientsAttribution', + ) + xrai_attribution = proto.Field( + proto.MESSAGE, + number=3, + oneof='method', + message='XraiAttribution', + ) + top_k = proto.Field( + proto.INT32, + number=4, + ) + output_indices = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.ListValue, + ) + + +class SampledShapleyAttribution(proto.Message): + r"""An attribution method that approximates Shapley values for + features that contribute to the label being predicted. A + sampling strategy is used to approximate the value rather than + considering all subsets of features. + + Attributes: + path_count (int): + Required. The number of feature permutations to consider + when approximating the Shapley values. + + Valid range of its value is [1, 50], inclusively. + """ + + path_count = proto.Field( + proto.INT32, + number=1, + ) + + +class IntegratedGradientsAttribution(proto.Message): + r"""An attribution method that computes the Aumann-Shapley value + taking advantage of the model's fully differentiable structure. + Refer to this paper for more details: + https://arxiv.org/abs/1703.01365 + + Attributes: + step_count (int): + Required. The number of steps for approximating the path + integral. A good value to start is 50 and gradually increase + until the sum to diff property is within the desired error + range. + + Valid range of its value is [1, 100], inclusively. + smooth_grad_config (google.cloud.aiplatform_v1.types.SmoothGradConfig): + Config for SmoothGrad approximation of + gradients. + When enabled, the gradients are approximated by + averaging the gradients from noisy samples in + the vicinity of the inputs. Adding noise can + help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1.types.BlurBaselineConfig): + Config for IG with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 + """ + + step_count = proto.Field( + proto.INT32, + number=1, + ) + smooth_grad_config = proto.Field( + proto.MESSAGE, + number=2, + message='SmoothGradConfig', + ) + blur_baseline_config = proto.Field( + proto.MESSAGE, + number=3, + message='BlurBaselineConfig', + ) + + +class XraiAttribution(proto.Message): + r"""An explanation method that redistributes Integrated Gradients + attributions to segmented regions, taking advantage of the + model's fully differentiable structure. Refer to this paper for + more details: https://arxiv.org/abs/1906.02825 + + Supported only by image Models. + + Attributes: + step_count (int): + Required. The number of steps for approximating the path + integral. A good value to start is 50 and gradually increase + until the sum to diff property is met within the desired + error range. + + Valid range of its value is [1, 100], inclusively. + smooth_grad_config (google.cloud.aiplatform_v1.types.SmoothGradConfig): + Config for SmoothGrad approximation of + gradients. + When enabled, the gradients are approximated by + averaging the gradients from noisy samples in + the vicinity of the inputs. Adding noise can + help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1.types.BlurBaselineConfig): + Config for XRAI with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 + """ + + step_count = proto.Field( + proto.INT32, + number=1, + ) + smooth_grad_config = proto.Field( + proto.MESSAGE, + number=2, + message='SmoothGradConfig', + ) + blur_baseline_config = proto.Field( + proto.MESSAGE, + number=3, + message='BlurBaselineConfig', + ) + + +class SmoothGradConfig(proto.Message): + r"""Config for SmoothGrad approximation of gradients. + When enabled, the gradients are approximated by averaging the + gradients from noisy samples in the vicinity of the inputs. + Adding noise can help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + noise_sigma (float): + This is a single float value and will be used to add noise + to all the features. Use this field when all features are + normalized to have the same distribution: scale to range [0, + 1], [-1, 1] or z-scoring, where features are normalized to + have 0-mean and 1-variance. Learn more about + `normalization `__. + + For best results the recommended value is about 10% - 20% of + the standard deviation of the input feature. Refer to + section 3.2 of the SmoothGrad paper: + https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. + + If the distribution is different per feature, set + [feature_noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.feature_noise_sigma] + instead for each feature. + + This field is a member of `oneof`_ ``GradientNoiseSigma``. + feature_noise_sigma (google.cloud.aiplatform_v1.types.FeatureNoiseSigma): + This is similar to + [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma], + but provides additional flexibility. A separate noise sigma + can be provided for each feature, which is useful if their + distributions are different. No noise is added to features + that are not set. If this field is unset, + [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma] + will be used for all features. + + This field is a member of `oneof`_ ``GradientNoiseSigma``. + noisy_sample_count (int): + The number of gradient samples to use for approximation. The + higher this number, the more accurate the gradient is, but + the runtime complexity increases by this factor as well. + Valid range of its value is [1, 50]. Defaults to 3. + """ + + noise_sigma = proto.Field( + proto.FLOAT, + number=1, + oneof='GradientNoiseSigma', + ) + feature_noise_sigma = proto.Field( + proto.MESSAGE, + number=2, + oneof='GradientNoiseSigma', + message='FeatureNoiseSigma', + ) + noisy_sample_count = proto.Field( + proto.INT32, + number=3, + ) + + +class FeatureNoiseSigma(proto.Message): + r"""Noise sigma by features. Noise sigma represents the standard + deviation of the gaussian kernel that will be used to add noise + to interpolated inputs prior to computing gradients. + + Attributes: + noise_sigma (Sequence[google.cloud.aiplatform_v1.types.FeatureNoiseSigma.NoiseSigmaForFeature]): + Noise sigma per feature. No noise is added to + features that are not set. + """ + + class NoiseSigmaForFeature(proto.Message): + r"""Noise sigma for a single feature. + + Attributes: + name (str): + The name of the input feature for which noise sigma is + provided. The features are defined in [explanation metadata + inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. + sigma (float): + This represents the standard deviation of the Gaussian + kernel that will be used to add noise to the feature prior + to computing gradients. Similar to + [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma] + but represents the noise added to the current feature. + Defaults to 0.1. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + sigma = proto.Field( + proto.FLOAT, + number=2, + ) + + noise_sigma = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=NoiseSigmaForFeature, + ) + + +class BlurBaselineConfig(proto.Message): + r"""Config for blur baseline. + When enabled, a linear path from the maximally blurred image to + the input image is created. Using a blurred baseline instead of + zero (black image) is motivated by the BlurIG approach explained + here: + https://arxiv.org/abs/2004.03383 + + Attributes: + max_blur_sigma (float): + The standard deviation of the blur kernel for + the blurred baseline. The same blurring + parameter is used for both the height and the + width dimension. If not set, the method defaults + to the zero (i.e. black for images) baseline. + """ + + max_blur_sigma = proto.Field( + proto.FLOAT, + number=1, + ) + + +class ExplanationSpecOverride(proto.Message): + r"""The [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] + entries that can be overridden at [online + explanation][google.cloud.aiplatform.v1.PredictionService.Explain] + time. + + Attributes: + parameters (google.cloud.aiplatform_v1.types.ExplanationParameters): + The parameters to be overridden. Note that the + [method][google.cloud.aiplatform.v1.ExplanationParameters.method] + cannot be changed. If not specified, no parameter is + overridden. + metadata (google.cloud.aiplatform_v1.types.ExplanationMetadataOverride): + The metadata to be overridden. If not + specified, no metadata is overridden. + examples_override (google.cloud.aiplatform_v1.types.ExamplesOverride): + The example-based explanations parameter + overrides. + """ + + parameters = proto.Field( + proto.MESSAGE, + number=1, + message='ExplanationParameters', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='ExplanationMetadataOverride', + ) + examples_override = proto.Field( + proto.MESSAGE, + number=3, + message='ExamplesOverride', + ) + + +class ExplanationMetadataOverride(proto.Message): + r"""The + [ExplanationMetadata][google.cloud.aiplatform.v1.ExplanationMetadata] + entries that can be overridden at [online + explanation][google.cloud.aiplatform.v1.PredictionService.Explain] + time. + + Attributes: + inputs (Mapping[str, google.cloud.aiplatform_v1.types.ExplanationMetadataOverride.InputMetadataOverride]): + Required. Overrides the [input + metadata][google.cloud.aiplatform.v1.ExplanationMetadata.inputs] + of the features. The key is the name of the feature to be + overridden. The keys specified here must exist in the input + metadata to be overridden. If a feature is not specified + here, the corresponding feature's input metadata is not + overridden. + """ + + class InputMetadataOverride(proto.Message): + r"""The [input + metadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata] + entries to be overridden. + + Attributes: + input_baselines (Sequence[google.protobuf.struct_pb2.Value]): + Baseline inputs for this feature. + + This overrides the ``input_baseline`` field of the + [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata] + object of the corresponding feature's input metadata. If + it's not specified, the original baselines are not + overridden. + """ + + input_baselines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=InputMetadataOverride, + ) + + +class ExamplesOverride(proto.Message): + r"""Overrides for example-based explanations. + + Attributes: + neighbor_count (int): + The number of neighbors to return. + crowding_count (int): + The number of neighbors to return that have + the same crowding tag. + restrictions (Sequence[google.cloud.aiplatform_v1.types.ExamplesRestrictionsNamespace]): + Restrict the resulting nearest neighbors to + respect these constraints. + return_embeddings (bool): + If true, return the embeddings instead of + neighbors. + data_format (google.cloud.aiplatform_v1.types.ExamplesOverride.DataFormat): + The format of the data being provided with + each call. + """ + class DataFormat(proto.Enum): + r"""Data format enum.""" + DATA_FORMAT_UNSPECIFIED = 0 + INSTANCES = 1 + EMBEDDINGS = 2 + + neighbor_count = proto.Field( + proto.INT32, + number=1, + ) + crowding_count = proto.Field( + proto.INT32, + number=2, + ) + restrictions = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='ExamplesRestrictionsNamespace', + ) + return_embeddings = proto.Field( + proto.BOOL, + number=4, + ) + data_format = proto.Field( + proto.ENUM, + number=5, + enum=DataFormat, + ) + + +class ExamplesRestrictionsNamespace(proto.Message): + r"""Restrictions namespace for example-based explanations + overrides. + + Attributes: + namespace_name (str): + The namespace name. + allow (Sequence[str]): + The list of allowed tags. + deny (Sequence[str]): + The list of deny tags. + """ + + namespace_name = proto.Field( + proto.STRING, + number=1, + ) + allow = proto.RepeatedField( + proto.STRING, + number=2, + ) + deny = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py new file mode 100644 index 0000000000..2efa653075 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py @@ -0,0 +1,467 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ExplanationMetadata', + }, +) + + +class ExplanationMetadata(proto.Message): + r"""Metadata describing the Model's input and output for + explanation. + + Attributes: + inputs (Mapping[str, google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata]): + Required. Map from feature names to feature input metadata. + Keys are the name of the features. Values are the + specification of the feature. + + An empty InputMetadata is valid. It describes a text feature + which has the name specified as the key in + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. + The baseline of the empty feature is chosen by Vertex AI. + + For Vertex AI-provided Tensorflow images, the key can be any + friendly name of the feature. Once specified, + [featureAttributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] + are keyed by this key (if not grouped with another feature). + + For custom images, the key must match with the key in + [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]. + outputs (Mapping[str, google.cloud.aiplatform_v1.types.ExplanationMetadata.OutputMetadata]): + Required. Map from output names to output + metadata. + For Vertex AI-provided Tensorflow images, keys + can be any user defined string that consists of + any UTF-8 characters. + For custom images, keys are the name of the + output field in the prediction to be explained. + + Currently only one key is allowed. + feature_attributions_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing the format of the [feature + attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML tabular Models always have this field populated by + Vertex AI. Note: The URI given on output may be different, + including the URI scheme, than the one given on input. The + output URI will point to a location where the user only has + a read access. + latent_space_source (str): + Name of the source to generate embeddings for + example based explanations. + """ + + class InputMetadata(proto.Message): + r"""Metadata of the input of a feature. + + Fields other than + [InputMetadata.input_baselines][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.input_baselines] + are applicable only for Models that are using Vertex AI-provided + images for Tensorflow. + + Attributes: + input_baselines (Sequence[google.protobuf.struct_pb2.Value]): + Baseline inputs for this feature. + + If no baseline is specified, Vertex AI chooses the baseline + for this feature. If multiple baselines are specified, + Vertex AI returns the average attributions across them in + [Attribution.feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]. + + For Vertex AI-provided Tensorflow images (both 1.x and 2.x), + the shape of each baseline must match the shape of the input + tensor. If a scalar is provided, we broadcast to the same + shape as the input tensor. + + For custom images, the element of the baselines must be in + the same format as the feature's input in the + [instance][google.cloud.aiplatform.v1.ExplainRequest.instances][]. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + input_tensor_name (str): + Name of the input tensor for this feature. + Required and is only applicable to Vertex + AI-provided images for Tensorflow. + encoding (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Encoding): + Defines how the feature is encoded into the + input tensor. Defaults to IDENTITY. + modality (str): + Modality of the feature. Valid values are: + numeric, image. Defaults to numeric. + feature_value_domain (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.FeatureValueDomain): + The domain details of the input feature + value. Like min/max, original mean or standard + deviation if normalized. + indices_tensor_name (str): + Specifies the index of the values of the input tensor. + Required when the input tensor is a sparse representation. + Refer to Tensorflow documentation for more details: + https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + dense_shape_tensor_name (str): + Specifies the shape of the values of the input if the input + is a sparse representation. Refer to Tensorflow + documentation for more details: + https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + index_feature_mapping (Sequence[str]): + A list of feature names for each index in the input tensor. + Required when the input + [InputMetadata.encoding][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.encoding] + is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. + encoded_tensor_name (str): + Encoded tensor is a transformation of the input tensor. Must + be provided if choosing [Integrated Gradients + attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution] + or [XRAI + attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution] + and the input tensor is not differentiable. + + An encoded tensor is generated if the input tensor is + encoded by a lookup table. + encoded_baselines (Sequence[google.protobuf.struct_pb2.Value]): + A list of baselines for the encoded tensor. + The shape of each baseline should match the + shape of the encoded tensor. If a scalar is + provided, Vertex AI broadcasts to the same shape + as the encoded tensor. + visualization (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization): + Visualization configurations for image + explanation. + group_name (str): + Name of the group that the input belongs to. Features with + the same group name will be treated as one feature when + computing attributions. Features grouped together can have + different shapes in value. If provided, there will be one + single attribution generated in + [Attribution.feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions], + keyed by the group name. + """ + class Encoding(proto.Enum): + r"""Defines how a feature is encoded. Defaults to IDENTITY.""" + ENCODING_UNSPECIFIED = 0 + IDENTITY = 1 + BAG_OF_FEATURES = 2 + BAG_OF_FEATURES_SPARSE = 3 + INDICATOR = 4 + COMBINED_EMBEDDING = 5 + CONCAT_EMBEDDING = 6 + + class FeatureValueDomain(proto.Message): + r"""Domain details of the input feature value. Provides numeric + information about the feature, such as its range (min, max). If the + feature has been pre-processed, for example with z-scoring, then it + provides information about how to recover the original feature. For + example, if the input feature is an image and it has been + pre-processed to obtain 0-mean and stddev = 1 values, then + original_mean, and original_stddev refer to the mean and stddev of + the original feature (e.g. image tensor) from which input feature + (with mean = 0 and stddev = 1) was obtained. + + Attributes: + min_value (float): + The minimum permissible value for this + feature. + max_value (float): + The maximum permissible value for this + feature. + original_mean (float): + If this input feature has been normalized to a mean value of + 0, the original_mean specifies the mean value of the domain + prior to normalization. + original_stddev (float): + If this input feature has been normalized to a standard + deviation of 1.0, the original_stddev specifies the standard + deviation of the domain prior to normalization. + """ + + min_value = proto.Field( + proto.FLOAT, + number=1, + ) + max_value = proto.Field( + proto.FLOAT, + number=2, + ) + original_mean = proto.Field( + proto.FLOAT, + number=3, + ) + original_stddev = proto.Field( + proto.FLOAT, + number=4, + ) + + class Visualization(proto.Message): + r"""Visualization configurations for image explanation. + + Attributes: + type_ (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.Type): + Type of the image visualization. Only applicable to + [Integrated Gradients + attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution]. + OUTLINES shows regions of attribution, while PIXELS shows + per-pixel attribution. Defaults to OUTLINES. + polarity (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity): + Whether to only highlight pixels with + positive contributions, negative or both. + Defaults to POSITIVE. + color_map (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.ColorMap): + The color scheme used for the highlighted areas. + + Defaults to PINK_GREEN for [Integrated Gradients + attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution], + which shows positive attributions in green and negative in + pink. + + Defaults to VIRIDIS for [XRAI + attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution], + which highlights the most influential regions in yellow and + the least influential in blue. + clip_percent_upperbound (float): + Excludes attributions above the specified percentile from + the highlighted areas. Using the clip_percent_upperbound and + clip_percent_lowerbound together can be useful for filtering + out noise and making it easier to see areas of strong + attribution. Defaults to 99.9. + clip_percent_lowerbound (float): + Excludes attributions below the specified + percentile, from the highlighted areas. Defaults + to 62. + overlay_type (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType): + How the original image is displayed in the + visualization. Adjusting the overlay can help + increase visual clarity if the original image + makes it difficult to view the visualization. + Defaults to NONE. + """ + class Type(proto.Enum): + r"""Type of the image visualization. Only applicable to [Integrated + Gradients + attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution]. + """ + TYPE_UNSPECIFIED = 0 + PIXELS = 1 + OUTLINES = 2 + + class Polarity(proto.Enum): + r"""Whether to only highlight pixels with positive contributions, + negative or both. Defaults to POSITIVE. + """ + POLARITY_UNSPECIFIED = 0 + POSITIVE = 1 + NEGATIVE = 2 + BOTH = 3 + + class ColorMap(proto.Enum): + r"""The color scheme used for highlighting areas.""" + COLOR_MAP_UNSPECIFIED = 0 + PINK_GREEN = 1 + VIRIDIS = 2 + RED = 3 + GREEN = 4 + RED_GREEN = 6 + PINK_WHITE_GREEN = 5 + + class OverlayType(proto.Enum): + r"""How the original image is displayed in the visualization.""" + OVERLAY_TYPE_UNSPECIFIED = 0 + NONE = 1 + ORIGINAL = 2 + GRAYSCALE = 3 + MASK_BLACK = 4 + + type_ = proto.Field( + proto.ENUM, + number=1, + enum='ExplanationMetadata.InputMetadata.Visualization.Type', + ) + polarity = proto.Field( + proto.ENUM, + number=2, + enum='ExplanationMetadata.InputMetadata.Visualization.Polarity', + ) + color_map = proto.Field( + proto.ENUM, + number=3, + enum='ExplanationMetadata.InputMetadata.Visualization.ColorMap', + ) + clip_percent_upperbound = proto.Field( + proto.FLOAT, + number=4, + ) + clip_percent_lowerbound = proto.Field( + proto.FLOAT, + number=5, + ) + overlay_type = proto.Field( + proto.ENUM, + number=6, + enum='ExplanationMetadata.InputMetadata.Visualization.OverlayType', + ) + + input_baselines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + input_tensor_name = proto.Field( + proto.STRING, + number=2, + ) + encoding = proto.Field( + proto.ENUM, + number=3, + enum='ExplanationMetadata.InputMetadata.Encoding', + ) + modality = proto.Field( + proto.STRING, + number=4, + ) + feature_value_domain = proto.Field( + proto.MESSAGE, + number=5, + message='ExplanationMetadata.InputMetadata.FeatureValueDomain', + ) + indices_tensor_name = proto.Field( + proto.STRING, + number=6, + ) + dense_shape_tensor_name = proto.Field( + proto.STRING, + number=7, + ) + index_feature_mapping = proto.RepeatedField( + proto.STRING, + number=8, + ) + encoded_tensor_name = proto.Field( + proto.STRING, + number=9, + ) + encoded_baselines = proto.RepeatedField( + proto.MESSAGE, + number=10, + message=struct_pb2.Value, + ) + visualization = proto.Field( + proto.MESSAGE, + number=11, + message='ExplanationMetadata.InputMetadata.Visualization', + ) + group_name = proto.Field( + proto.STRING, + number=12, + ) + + class OutputMetadata(proto.Message): + r"""Metadata of the prediction output to be explained. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + index_display_name_mapping (google.protobuf.struct_pb2.Value): + Static mapping between the index and display name. + + Use this if the outputs are a deterministic n-dimensional + array, e.g. a list of scores of all the classes in a + pre-defined order for a multi-classification Model. It's not + feasible if the outputs are non-deterministic, e.g. the + Model produces top-k classes or sort the outputs by their + values. + + The shape of the value must be an n-dimensional array of + strings. The number of dimensions must match that of the + outputs to be explained. The + [Attribution.output_display_name][google.cloud.aiplatform.v1.Attribution.output_display_name] + is populated by locating in the mapping with + [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index]. + + This field is a member of `oneof`_ ``display_name_mapping``. + display_name_mapping_key (str): + Specify a field name in the prediction to look for the + display name. + + Use this if the prediction contains the display names for + the outputs. + + The display names in the prediction must have the same shape + of the outputs, so that it can be located by + [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] + for a specific output. + + This field is a member of `oneof`_ ``display_name_mapping``. + output_tensor_name (str): + Name of the output tensor. Required and is + only applicable to Vertex AI provided images for + Tensorflow. + """ + + index_display_name_mapping = proto.Field( + proto.MESSAGE, + number=1, + oneof='display_name_mapping', + message=struct_pb2.Value, + ) + display_name_mapping_key = proto.Field( + proto.STRING, + number=2, + oneof='display_name_mapping', + ) + output_tensor_name = proto.Field( + proto.STRING, + number=3, + ) + + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=InputMetadata, + ) + outputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message=OutputMetadata, + ) + feature_attributions_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + latent_space_source = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py new file mode 100644 index 0000000000..ab799dd61a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import feature_monitoring_stats +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Feature', + }, +) + + +class Feature(proto.Message): + r"""Feature Metadata information that describes an attribute of + an entity type. For example, apple is an entity type, and color + is a feature that describes apple. + + Attributes: + name (str): + Immutable. Name of the Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + The last part feature is assigned by the client. The feature + can be up to 64 characters long and can consist only of + ASCII Latin letters A-Z and a-z, underscore(_), and ASCII + digits 0-9 starting with a letter. The value will be unique + given an entity type. + description (str): + Description of the Feature. + value_type (google.cloud.aiplatform_v1.types.Feature.ValueType): + Required. Immutable. Type of Feature value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your Features. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one Feature + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + disable_monitoring (bool): + Optional. If not set, use the monitoring_config defined for + the EntityType this Feature belongs to. Only Features with + type + ([Feature.ValueType][google.cloud.aiplatform.v1.Feature.ValueType]) + BOOL, STRING, DOUBLE or INT64 can enable monitoring. + + If set to true, all types of data monitoring are disabled + despite the config on EntityType. + monitoring_stats_anomalies (Sequence[google.cloud.aiplatform_v1.types.Feature.MonitoringStatsAnomaly]): + Output only. The list of historical stats and + anomalies with specified objectives. + """ + class ValueType(proto.Enum): + r"""An enum representing the value type of a feature.""" + VALUE_TYPE_UNSPECIFIED = 0 + BOOL = 1 + BOOL_ARRAY = 2 + DOUBLE = 3 + DOUBLE_ARRAY = 4 + INT64 = 9 + INT64_ARRAY = 10 + STRING = 11 + STRING_ARRAY = 12 + BYTES = 13 + + class MonitoringStatsAnomaly(proto.Message): + r"""A list of historical [Snapshot + Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis] or [Import + Feature Analysis] + [FeaturestoreMonitoringConfig.ImportFeatureAnalysis] stats requested + by user, sorted by + [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1.FeatureStatsAnomaly.start_time] + descending. + + Attributes: + objective (google.cloud.aiplatform_v1.types.Feature.MonitoringStatsAnomaly.Objective): + Output only. The objective for each stats. + feature_stats_anomaly (google.cloud.aiplatform_v1.types.FeatureStatsAnomaly): + Output only. The stats and anomalies + generated at specific timestamp. + """ + class Objective(proto.Enum): + r"""If the objective in the request is both + Import Feature Analysis and Snapshot Analysis, this objective + could be one of them. Otherwise, this objective should be the + same as the objective in the request. + """ + OBJECTIVE_UNSPECIFIED = 0 + IMPORT_FEATURE_ANALYSIS = 1 + SNAPSHOT_ANALYSIS = 2 + + objective = proto.Field( + proto.ENUM, + number=1, + enum='Feature.MonitoringStatsAnomaly.Objective', + ) + feature_stats_anomaly = proto.Field( + proto.MESSAGE, + number=2, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + value_type = proto.Field( + proto.ENUM, + number=3, + enum=ValueType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + disable_monitoring = proto.Field( + proto.BOOL, + number=12, + ) + monitoring_stats_anomalies = proto.RepeatedField( + proto.MESSAGE, + number=11, + message=MonitoringStatsAnomaly, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py new file mode 100644 index 0000000000..1e55ce1de1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'FeatureStatsAnomaly', + }, +) + + +class FeatureStatsAnomaly(proto.Message): + r"""Stats and Anomaly generated at specific timestamp for specific + Feature. The start_time and end_time are used to define the time + range of the dataset that current stats belongs to, e.g. prediction + traffic is bucketed into prediction datasets by time window. If the + Dataset is not defined by time window, start_time = end_time. + Timestamp of the stats and anomalies always refers to end_time. Raw + stats and anomalies are stored in stats_uri or anomaly_uri in the + tensorflow defined protos. Field data_stats contains almost + identical information with the raw stats in Vertex AI defined proto, + for UI to display. + + Attributes: + score (float): + Feature importance score, only populated when cross-feature + monitoring is enabled. For now only used to represent + feature attribution score within range [0, 1] for + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] + and + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT]. + stats_uri (str): + Path of the stats file for current feature values in Cloud + Storage bucket. Format: + gs:////stats. Example: + gs://monitoring_bucket/feature_name/stats. Stats are stored + as binary format with Protobuf message + `tensorflow.metadata.v0.FeatureNameStatistics `__. + anomaly_uri (str): + Path of the anomaly file for current feature values in Cloud + Storage bucket. Format: + gs:////anomalies. Example: + gs://monitoring_bucket/feature_name/anomalies. Stats are + stored as binary format with Protobuf message Anoamlies are + stored as binary format with Protobuf message + [tensorflow.metadata.v0.AnomalyInfo] + (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). + distribution_deviation (float): + Deviation from the current stats to baseline + stats. 1. For categorical feature, the + distribution distance is calculated by + L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + anomaly_detection_threshold (float): + This is the threshold used when detecting anomalies. The + threshold can be changed by user, so this one might be + different from + [ThresholdConfig.value][google.cloud.aiplatform.v1.ThresholdConfig.value]. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The start timestamp of window where stats were generated. + For objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), start_time is only used + to indicate the monitoring intervals, so it always equals to + (end_time - monitoring_interval). + end_time (google.protobuf.timestamp_pb2.Timestamp): + The end timestamp of window where stats were generated. For + objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), end_time indicates the + timestamp of the data used to generate stats (e.g. timestamp + we take snapshots for feature values). + """ + + score = proto.Field( + proto.DOUBLE, + number=1, + ) + stats_uri = proto.Field( + proto.STRING, + number=3, + ) + anomaly_uri = proto.Field( + proto.STRING, + number=4, + ) + distribution_deviation = proto.Field( + proto.DOUBLE, + number=5, + ) + anomaly_detection_threshold = proto.Field( + proto.DOUBLE, + number=9, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py new file mode 100644 index 0000000000..76967ac855 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'IdMatcher', + 'FeatureSelector', + }, +) + + +class IdMatcher(proto.Message): + r"""Matcher for Features of an EntityType by Feature ID. + + Attributes: + ids (Sequence[str]): + Required. The following are accepted as ``ids``: + + - A single-element list containing only ``*``, which + selects all Features in the target EntityType, or + - A list containing only Feature IDs, which selects only + Features with those IDs in the target EntityType. + """ + + ids = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class FeatureSelector(proto.Message): + r"""Selector for Features of an EntityType. + + Attributes: + id_matcher (google.cloud.aiplatform_v1.types.IdMatcher): + Required. Matches Features based on ID. + """ + + id_matcher = proto.Field( + proto.MESSAGE, + number=1, + message='IdMatcher', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py new file mode 100644 index 0000000000..0eac43ab0c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Featurestore', + }, +) + + +class Featurestore(proto.Message): + r"""Vertex AI Feature Store provides a centralized repository for + organizing, storing, and serving ML features. The Featurestore + is a top-level container for your features and their values. + + Attributes: + name (str): + Output only. Name of the Featurestore. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was last updated. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your Featurestore. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one + Featurestore(System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + online_serving_config (google.cloud.aiplatform_v1.types.Featurestore.OnlineServingConfig): + Optional. Config for online storage + resources. If unset, the featurestore will not + have an online store and cannot be used for + online serving. + state (google.cloud.aiplatform_v1.types.Featurestore.State): + Output only. State of the featurestore. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Optional. Customer-managed encryption key + spec for data storage. If set, both of the + online and offline data storage will be secured + by this key. + """ + class State(proto.Enum): + r"""Possible states a featurestore can have.""" + STATE_UNSPECIFIED = 0 + STABLE = 1 + UPDATING = 2 + + class OnlineServingConfig(proto.Message): + r"""OnlineServingConfig specifies the details for provisioning + online serving resources. + + Attributes: + fixed_node_count (int): + The number of nodes for the online store. The + number of nodes doesn't scale automatically, but + you can manually update the number of nodes. If + set to 0, the featurestore will not have an + online store and cannot be used for online + serving. + scaling (google.cloud.aiplatform_v1.types.Featurestore.OnlineServingConfig.Scaling): + Online serving scaling configuration. Only one of + ``fixed_node_count`` and ``scaling`` can be set. Setting one + will reset the other. + """ + + class Scaling(proto.Message): + r"""Online serving scaling configuration. If min_node_count and + max_node_count are set to the same value, the cluster will be + configured with the fixed number of node (no auto-scaling). + + Attributes: + min_node_count (int): + Required. The minimum number of nodes to + scale down to. Must be greater than or equal to + 1. + max_node_count (int): + The maximum number of nodes to scale up to. Must be greater + than min_node_count, and less than or equal to 10 times of + 'min_node_count'. + """ + + min_node_count = proto.Field( + proto.INT32, + number=1, + ) + max_node_count = proto.Field( + proto.INT32, + number=2, + ) + + fixed_node_count = proto.Field( + proto.INT32, + number=2, + ) + scaling = proto.Field( + proto.MESSAGE, + number=4, + message='Featurestore.OnlineServingConfig.Scaling', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + online_serving_config = proto.Field( + proto.MESSAGE, + number=7, + message=OnlineServingConfig, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=State, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=10, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_monitoring.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_monitoring.py new file mode 100644 index 0000000000..49c5fe541f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_monitoring.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'FeaturestoreMonitoringConfig', + }, +) + + +class FeaturestoreMonitoringConfig(proto.Message): + r"""Configuration of how features in Featurestore are monitored. + + Attributes: + snapshot_analysis (google.cloud.aiplatform_v1.types.FeaturestoreMonitoringConfig.SnapshotAnalysis): + The config for Snapshot Analysis Based + Feature Monitoring. + import_features_analysis (google.cloud.aiplatform_v1.types.FeaturestoreMonitoringConfig.ImportFeaturesAnalysis): + The config for ImportFeatures Analysis Based + Feature Monitoring. + numerical_threshold_config (google.cloud.aiplatform_v1.types.FeaturestoreMonitoringConfig.ThresholdConfig): + Threshold for numerical features of anomaly detection. This + is shared by all objectives of Featurestore Monitoring for + numerical features (i.e. Features with type + ([Feature.ValueType][google.cloud.aiplatform.v1.Feature.ValueType]) + DOUBLE or INT64). + categorical_threshold_config (google.cloud.aiplatform_v1.types.FeaturestoreMonitoringConfig.ThresholdConfig): + Threshold for categorical features of anomaly detection. + This is shared by all types of Featurestore Monitoring for + categorical features (i.e. Features with type + ([Feature.ValueType][google.cloud.aiplatform.v1.Feature.ValueType]) + BOOL or STRING). + """ + + class SnapshotAnalysis(proto.Message): + r"""Configuration of the Featurestore's Snapshot Analysis Based + Monitoring. This type of analysis generates statistics for each + Feature based on a snapshot of the latest feature value of each + entities every monitoring_interval. + + Attributes: + disabled (bool): + The monitoring schedule for snapshot analysis. For + EntityType-level config: unset / disabled = true indicates + disabled by default for Features under it; otherwise by + default enable snapshot analysis monitoring with + monitoring_interval for Features under it. Feature-level + config: disabled = true indicates disabled regardless of the + EntityType-level config; unset monitoring_interval indicates + going with EntityType-level config; otherwise run snapshot + analysis monitoring with monitoring_interval regardless of + the EntityType-level config. Explicitly Disable the snapshot + analysis based monitoring. + monitoring_interval_days (int): + Configuration of the snapshot analysis based monitoring + pipeline running interval. The value indicates number of + days. If both + [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days] + and + [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][] + are set when creating/updating EntityTypes/Features, + [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days] + will be used. + staleness_days (int): + Customized export features time window for + snapshot analysis. Unit is one day. Default + value is 3 weeks. Minimum value is 1 day. + Maximum value is 4000 days. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + monitoring_interval_days = proto.Field( + proto.INT32, + number=3, + ) + staleness_days = proto.Field( + proto.INT32, + number=4, + ) + + class ImportFeaturesAnalysis(proto.Message): + r"""Configuration of the Featurestore's ImportFeature Analysis Based + Monitoring. This type of analysis generates statistics for values of + each Feature imported by every [ImportFeatureValues][] operation. + + Attributes: + state (google.cloud.aiplatform_v1.types.FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.State): + Whether to enable / disable / inherite + default hebavior for import features analysis. + anomaly_detection_baseline (google.cloud.aiplatform_v1.types.FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.Baseline): + The baseline used to do anomaly detection for + the statistics generated by import features + analysis. + """ + class State(proto.Enum): + r"""The state defines whether to enable ImportFeature analysis.""" + STATE_UNSPECIFIED = 0 + DEFAULT = 1 + ENABLED = 2 + DISABLED = 3 + + class Baseline(proto.Enum): + r"""Defines the baseline to do anomaly detection for feature values + imported by each [ImportFeatureValues][] operation. + """ + BASELINE_UNSPECIFIED = 0 + LATEST_STATS = 1 + MOST_RECENT_SNAPSHOT_STATS = 2 + PREVIOUS_IMPORT_FEATURES_STATS = 3 + + state = proto.Field( + proto.ENUM, + number=1, + enum='FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.State', + ) + anomaly_detection_baseline = proto.Field( + proto.ENUM, + number=2, + enum='FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.Baseline', + ) + + class ThresholdConfig(proto.Message): + r"""The config for Featurestore Monitoring threshold. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value (float): + Specify a threshold value that can trigger + the alert. 1. For categorical feature, the + distribution distance is calculated by + L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. Each feature must have a non-zero + threshold if they need to be monitored. + Otherwise no alert will be triggered for that + feature. + + This field is a member of `oneof`_ ``threshold``. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + oneof='threshold', + ) + + snapshot_analysis = proto.Field( + proto.MESSAGE, + number=1, + message=SnapshotAnalysis, + ) + import_features_analysis = proto.Field( + proto.MESSAGE, + number=2, + message=ImportFeaturesAnalysis, + ) + numerical_threshold_config = proto.Field( + proto.MESSAGE, + number=3, + message=ThresholdConfig, + ) + categorical_threshold_config = proto.Field( + proto.MESSAGE, + number=4, + message=ThresholdConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py new file mode 100644 index 0000000000..a324b52884 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py @@ -0,0 +1,381 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1.types import types +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'FeatureValue', + 'FeatureValueList', + }, +) + + +class ReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + entity_type (str): + Required. The resource name of the EntityType for the entity + being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be ``user``. + entity_id (str): + Required. ID for a specific entity. For example, for a + machine learning model predicting user clicks on a website, + an entity ID could be ``user_123``. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_id = proto.Field( + proto.STRING, + number=2, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=3, + message=gca_feature_selector.FeatureSelector, + ) + + +class ReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + header (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.Header): + Response header. + entity_view (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView): + Entity view with Feature values. This may be + the entity in the Featurestore if values for all + Features were requested, or a projection of the + entity in the Featurestore if values for only + some Features were requested. + """ + + class FeatureDescriptor(proto.Message): + r"""Metadata for requested Features. + + Attributes: + id (str): + Feature ID. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + + class Header(proto.Message): + r"""Response header with metadata for the requested + [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1.ReadFeatureValuesRequest.entity_type] + and Features. + + Attributes: + entity_type (str): + The resource name of the EntityType from the + [ReadFeatureValuesRequest][google.cloud.aiplatform.v1.ReadFeatureValuesRequest]. + Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + feature_descriptors (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.FeatureDescriptor]): + List of Feature metadata corresponding to each piece of + [ReadFeatureValuesResponse.data][]. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + feature_descriptors = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ReadFeatureValuesResponse.FeatureDescriptor', + ) + + class EntityView(proto.Message): + r"""Entity view with Feature values. + + Attributes: + entity_id (str): + ID of the requested entity. + data (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView.Data]): + Each piece of data holds the k requested values for one + requested Feature. If no values for the requested Feature + exist, the corresponding cell will be empty. This has the + same size and is in the same order as the features from the + header + [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1.ReadFeatureValuesResponse.header]. + """ + + class Data(proto.Message): + r"""Container to hold value(s), successive in time, for one + Feature from the request. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value (google.cloud.aiplatform_v1.types.FeatureValue): + Feature value if a single value is requested. + + This field is a member of `oneof`_ ``data``. + values (google.cloud.aiplatform_v1.types.FeatureValueList): + Feature values list if values, successive in + time, are requested. If the requested number of + values is greater than the number of existing + Feature values, nonexistent values are omitted + instead of being returned as empty. + + This field is a member of `oneof`_ ``data``. + """ + + value = proto.Field( + proto.MESSAGE, + number=1, + oneof='data', + message='FeatureValue', + ) + values = proto.Field( + proto.MESSAGE, + number=2, + oneof='data', + message='FeatureValueList', + ) + + entity_id = proto.Field( + proto.STRING, + number=1, + ) + data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ReadFeatureValuesResponse.EntityView.Data', + ) + + header = proto.Field( + proto.MESSAGE, + number=1, + message=Header, + ) + entity_view = proto.Field( + proto.MESSAGE, + number=2, + message=EntityView, + ) + + +class StreamingReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + + Attributes: + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be ``user``. + entity_ids (Sequence[str]): + Required. IDs of entities to read Feature values of. The + maximum number of IDs is 100. For example, for a machine + learning model predicting user clicks on a website, an + entity ID could be ``user_123``. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. Feature IDs will be + deduplicated. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=3, + message=gca_feature_selector.FeatureSelector, + ) + + +class FeatureValue(proto.Message): + r"""Value for a feature. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bool_value (bool): + Bool type feature value. + + This field is a member of `oneof`_ ``value``. + double_value (float): + Double type feature value. + + This field is a member of `oneof`_ ``value``. + int64_value (int): + Int64 feature value. + + This field is a member of `oneof`_ ``value``. + string_value (str): + String feature value. + + This field is a member of `oneof`_ ``value``. + bool_array_value (google.cloud.aiplatform_v1.types.BoolArray): + A list of bool type feature value. + + This field is a member of `oneof`_ ``value``. + double_array_value (google.cloud.aiplatform_v1.types.DoubleArray): + A list of double type feature value. + + This field is a member of `oneof`_ ``value``. + int64_array_value (google.cloud.aiplatform_v1.types.Int64Array): + A list of int64 type feature value. + + This field is a member of `oneof`_ ``value``. + string_array_value (google.cloud.aiplatform_v1.types.StringArray): + A list of string type feature value. + + This field is a member of `oneof`_ ``value``. + bytes_value (bytes): + Bytes feature value. + + This field is a member of `oneof`_ ``value``. + metadata (google.cloud.aiplatform_v1.types.FeatureValue.Metadata): + Metadata of feature value. + """ + + class Metadata(proto.Message): + r"""Metadata of feature value. + + Attributes: + generate_time (google.protobuf.timestamp_pb2.Timestamp): + Feature generation timestamp. Typically, it + is provided by user at feature ingestion time. + If not, feature store will use the system + timestamp when the data is ingested into feature + store. For streaming ingestion, the time, + aligned by days, must be no older than five + years (1825 days) and no later than one year + (366 days) in the future. + """ + + generate_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + + bool_value = proto.Field( + proto.BOOL, + number=1, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=2, + oneof='value', + ) + int64_value = proto.Field( + proto.INT64, + number=5, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=6, + oneof='value', + ) + bool_array_value = proto.Field( + proto.MESSAGE, + number=7, + oneof='value', + message=types.BoolArray, + ) + double_array_value = proto.Field( + proto.MESSAGE, + number=8, + oneof='value', + message=types.DoubleArray, + ) + int64_array_value = proto.Field( + proto.MESSAGE, + number=11, + oneof='value', + message=types.Int64Array, + ) + string_array_value = proto.Field( + proto.MESSAGE, + number=12, + oneof='value', + message=types.StringArray, + ) + bytes_value = proto.Field( + proto.BYTES, + number=13, + oneof='value', + ) + metadata = proto.Field( + proto.MESSAGE, + number=14, + message=Metadata, + ) + + +class FeatureValueList(proto.Message): + r"""Container for list of values. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1.types.FeatureValue]): + A list of feature values. All of them should + be the same data type. + """ + + values = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='FeatureValue', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py new file mode 100644 index 0000000000..95aeb2b424 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -0,0 +1,1665 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateFeaturestoreRequest', + 'GetFeaturestoreRequest', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'UpdateFeaturestoreRequest', + 'DeleteFeaturestoreRequest', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'BatchReadFeatureValuesRequest', + 'ExportFeatureValuesRequest', + 'DestinationFeatureSetting', + 'FeatureValueDestination', + 'ExportFeatureValuesResponse', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeRequest', + 'GetEntityTypeRequest', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'UpdateEntityTypeRequest', + 'DeleteEntityTypeRequest', + 'CreateFeatureRequest', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'GetFeatureRequest', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateFeatureRequest', + 'DeleteFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreOperationMetadata', + 'ImportFeatureValuesOperationMetadata', + 'ExportFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesOperationMetadata', + 'CreateEntityTypeOperationMetadata', + 'CreateFeatureOperationMetadata', + 'BatchCreateFeaturesOperationMetadata', + }, +) + + +class CreateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. + + Attributes: + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore to create. + featurestore_id (str): + Required. The ID to use for this Featurestore, which will + become the final component of the Featurestore's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within the project and location. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + featurestore = proto.Field( + proto.MESSAGE, + number=2, + message=gca_featurestore.Featurestore, + ) + featurestore_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore + resource. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListFeaturestoresRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the featurestores that match the filter expression. + The following fields are supported: + + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``online_serving_config.fixed_node_count``: Supports + ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` + comparisons. + - ``labels``: Supports key-value equality and key presence. + + Examples: + + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + Featurestores created or updated after 2020-01-01. + - ``labels.env = "prod"`` Featurestores with label "env" + set to "prod". + page_size (int): + The maximum number of Featurestores to + return. The service may return fewer than this + value. If unspecified, at most 100 Featurestores + will be returned. The maximum value is 100; any + value greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported Fields: + + - ``create_time`` + - ``update_time`` + - ``online_serving_config.fixed_node_count`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListFeaturestoresResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. + + Attributes: + featurestores (Sequence[google.cloud.aiplatform_v1.types.Featurestore]): + The Featurestores matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1.ListFeaturestoresRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + featurestores = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_featurestore.Featurestore, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. + + Attributes: + featurestore (google.cloud.aiplatform_v1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Featurestore resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + """ + + featurestore = proto.Field( + proto.MESSAGE, + number=1, + message=gca_featurestore.Featurestore, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + force (bool): + If set to true, any EntityTypes and Features + for this Featurestore will also be deleted. + (Otherwise, the request will only work if the + Featurestore has no EntityTypes.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class ImportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + avro_source (google.cloud.aiplatform_v1.types.AvroSource): + + This field is a member of `oneof`_ ``source``. + bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): + + This field is a member of `oneof`_ ``source``. + csv_source (google.cloud.aiplatform_v1.types.CsvSource): + + This field is a member of `oneof`_ ``source``. + feature_time_field (str): + Source column that holds the Feature + timestamp for all Feature values in each entity. + + This field is a member of `oneof`_ ``feature_time_source``. + feature_time (google.protobuf.timestamp_pb2.Timestamp): + Single Feature timestamp for all entities + being imported. The timestamp must not have + higher than millisecond precision. + + This field is a member of `oneof`_ ``feature_time_source``. + entity_type (str): + Required. The resource name of the EntityType grouping the + Features for which values are being imported. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + entity_id_field (str): + Source column that holds entity IDs. If not provided, entity + IDs are extracted from the column named ``entity_id``. + feature_specs (Sequence[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest.FeatureSpec]): + Required. Specifications defining which Feature values to + import from the entity. The request fails if no + feature_specs are provided, and having multiple + feature_specs for one Feature is not allowed. + disable_online_serving (bool): + If set, data will not be imported for online + serving. This is typically used for backfilling, + where Feature generation timestamps are not in + the timestamp range needed for online serving. + worker_count (int): + Specifies the number of workers that are used + to write data to the Featurestore. Consider the + online serving capacity that you require to + achieve the desired import throughput without + interfering with online serving. The value must + be positive, and less than or equal to 100. If + not set, defaults to using 1 worker. The low + count ensures minimal impact on online serving + performance. + disable_ingestion_analysis (bool): + If true, API doesn't start ingestion analysis + pipeline. + """ + + class FeatureSpec(proto.Message): + r"""Defines the Feature value(s) to import. + + Attributes: + id (str): + Required. ID of the Feature to import values + of. This Feature must exist in the target + EntityType, or the request will fail. + source_field (str): + Source column to get the Feature values from. + If not set, uses the column with the same name + as the Feature ID. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + source_field = proto.Field( + proto.STRING, + number=2, + ) + + avro_source = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message=io.AvroSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message=io.BigQuerySource, + ) + csv_source = proto.Field( + proto.MESSAGE, + number=4, + oneof='source', + message=io.CsvSource, + ) + feature_time_field = proto.Field( + proto.STRING, + number=6, + oneof='feature_time_source', + ) + feature_time = proto.Field( + proto.MESSAGE, + number=7, + oneof='feature_time_source', + message=timestamp_pb2.Timestamp, + ) + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_id_field = proto.Field( + proto.STRING, + number=5, + ) + feature_specs = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=FeatureSpec, + ) + disable_online_serving = proto.Field( + proto.BOOL, + number=9, + ) + worker_count = proto.Field( + proto.INT32, + number=11, + ) + disable_ingestion_analysis = proto.Field( + proto.BOOL, + number=12, + ) + + +class ImportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of Feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + imported_entity_count = proto.Field( + proto.INT64, + number=1, + ) + imported_feature_value_count = proto.Field( + proto.INT64, + number=2, + ) + invalid_row_count = proto.Field( + proto.INT64, + number=6, + ) + + +class BatchReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + csv_read_instances (google.cloud.aiplatform_v1.types.CsvSource): + Each read instance consists of exactly one read timestamp + and one or more entity IDs identifying entities of the + corresponding EntityTypes whose Features are requested. + + Each output instance contains Feature values of requested + entities concatenated together as of the read time. + + An example read instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. + + An example output instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. + + Timestamp in each read instance must be millisecond-aligned. + + ``csv_read_instances`` are read instances stored in a + plain-text CSV file. The header should be: + [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp + + The columns can be in any order. + + Values in the timestamp column must use the RFC 3339 format, + e.g. ``2012-07-30T10:43:17.123Z``. + + This field is a member of `oneof`_ ``read_option``. + bigquery_read_instances (google.cloud.aiplatform_v1.types.BigQuerySource): + Similar to csv_read_instances, but from BigQuery source. + + This field is a member of `oneof`_ ``read_option``. + featurestore (str): + Required. The resource name of the Featurestore from which + to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + destination (google.cloud.aiplatform_v1.types.FeatureValueDestination): + Required. Specifies output location and + format. + pass_through_fields (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.PassThroughField]): + When not empty, the specified fields in the + \*_read_instances source will be joined as-is in the output, + in addition to those fields from the Featurestore Entity. + + For BigQuery source, the type of the pass-through values + will be automatically inferred. For CSV source, the + pass-through values will be passed as opaque bytes. + entity_type_specs (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): + Required. Specifies EntityType grouping Features to read + values of and settings. Each EntityType referenced in + [BatchReadFeatureValuesRequest.entity_type_specs] must have + a column specifying entity IDs in the EntityType in + [BatchReadFeatureValuesRequest.request][] . + """ + + class PassThroughField(proto.Message): + r"""Describe pass-through fields in read_instance source. + + Attributes: + field_name (str): + Required. The name of the field in the CSV header or the + name of the column in BigQuery table. The naming restriction + is the same as + [Feature.name][google.cloud.aiplatform.v1.Feature.name]. + """ + + field_name = proto.Field( + proto.STRING, + number=1, + ) + + class EntityTypeSpec(proto.Message): + r"""Selects Features of an EntityType to read values of and + specifies read settings. + + Attributes: + entity_type_id (str): + Required. ID of the EntityType to select Features. The + EntityType id is the + [entity_type_id][google.cloud.aiplatform.v1.CreateEntityTypeRequest.entity_type_id] + specified during EntityType creation. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selectors choosing which Feature + values to read from the EntityType. + settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]): + Per-Feature settings for the batch read. + """ + + entity_type_id = proto.Field( + proto.STRING, + number=1, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=2, + message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='DestinationFeatureSetting', + ) + + csv_read_instances = proto.Field( + proto.MESSAGE, + number=3, + oneof='read_option', + message=io.CsvSource, + ) + bigquery_read_instances = proto.Field( + proto.MESSAGE, + number=5, + oneof='read_option', + message=io.BigQuerySource, + ) + featurestore = proto.Field( + proto.STRING, + number=1, + ) + destination = proto.Field( + proto.MESSAGE, + number=4, + message='FeatureValueDestination', + ) + pass_through_fields = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=PassThroughField, + ) + entity_type_specs = proto.RepeatedField( + proto.MESSAGE, + number=7, + message=EntityTypeSpec, + ) + + +class ExportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + snapshot_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.SnapshotExport): + Exports the latest Feature values of all + entities of the EntityType within a time range. + + This field is a member of `oneof`_ ``mode``. + full_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.FullExport): + Exports all historical values of all entities + of the EntityType within a time range + + This field is a member of `oneof`_ ``mode``. + entity_type (str): + Required. The resource name of the EntityType from which to + export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + destination (google.cloud.aiplatform_v1.types.FeatureValueDestination): + Required. Specifies destination location and + format. + feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): + Required. Selects Features to export values + of. + settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]): + Per-Feature export settings. + """ + + class SnapshotExport(proto.Message): + r"""Describes exporting the latest Feature values of all entities of the + EntityType between [start_time, snapshot_time]. + + Attributes: + snapshot_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. + """ + + snapshot_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + class FullExport(proto.Message): + r"""Describes exporting all historical Feature values of all entities of + the EntityType between [start_time, end_time]. + + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + + snapshot_export = proto.Field( + proto.MESSAGE, + number=3, + oneof='mode', + message=SnapshotExport, + ) + full_export = proto.Field( + proto.MESSAGE, + number=7, + oneof='mode', + message=FullExport, + ) + entity_type = proto.Field( + proto.STRING, + number=1, + ) + destination = proto.Field( + proto.MESSAGE, + number=4, + message='FeatureValueDestination', + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=5, + message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='DestinationFeatureSetting', + ) + + +class DestinationFeatureSetting(proto.Message): + r""" + + Attributes: + feature_id (str): + Required. The ID of the Feature to apply the + setting to. + destination_field (str): + Specify the field name in the export + destination. If not specified, Feature ID is + used. + """ + + feature_id = proto.Field( + proto.STRING, + number=1, + ) + destination_field = proto.Field( + proto.STRING, + number=2, + ) + + +class FeatureValueDestination(proto.Message): + r"""A destination location for Feature values and format. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + Output in BigQuery format. + [BigQueryDestination.output_uri][google.cloud.aiplatform.v1.BigQueryDestination.output_uri] + in + [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1.FeatureValueDestination.bigquery_destination] + must refer to a table. + + This field is a member of `oneof`_ ``destination``. + tfrecord_destination (google.cloud.aiplatform_v1.types.TFRecordDestination): + Output in TFRecord format. + + Below are the mapping from Feature value type in + Featurestore to Feature value type in TFRecord: + + :: + + Value type in Featurestore | Value type in TFRecord + DOUBLE, DOUBLE_ARRAY | FLOAT_LIST + INT64, INT64_ARRAY | INT64_LIST + STRING, STRING_ARRAY, BYTES | BYTES_LIST + true -> byte_string("true"), false -> byte_string("false") + BOOL, BOOL_ARRAY (true, false) | BYTES_LIST + + This field is a member of `oneof`_ ``destination``. + csv_destination (google.cloud.aiplatform_v1.types.CsvDestination): + Output in CSV format. Array Feature value + types are not allowed in CSV format. + + This field is a member of `oneof`_ ``destination``. + """ + + bigquery_destination = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message=io.BigQueryDestination, + ) + tfrecord_destination = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.TFRecordDestination, + ) + csv_destination = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.CsvDestination, + ) + + +class ExportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + + """ + + +class BatchReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. + + """ + + +class CreateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to create + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + entity_type (google.cloud.aiplatform_v1.types.EntityType): + The EntityType to create. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within a featurestore. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + entity_type = proto.Field( + proto.MESSAGE, + number=2, + message=gca_entity_type.EntityType, + ) + entity_type_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. + + Attributes: + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEntityTypesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + filter (str): + Lists the EntityTypes that match the filter expression. The + following filters are supported: + + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + EntityTypes having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any EntityType which has a label + with 'env' as the key. + page_size (int): + The maximum number of EntityTypes to return. + The service may return fewer than this value. If + unspecified, at most 1000 EntityTypes will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. + + Supported fields: + + - ``entity_type_id`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListEntityTypesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. + + Attributes: + entity_types (Sequence[google.cloud.aiplatform_v1.types.EntityType]): + The EntityTypes matching the request. + next_page_token (str): + A token, which can be sent as + [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1.ListEntityTypesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + entity_types = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_entity_type.EntityType, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. + + Attributes: + entity_type (google.cloud.aiplatform_v1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the EntityType resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + """ + + entity_type = proto.Field( + proto.MESSAGE, + number=1, + message=gca_entity_type.EntityType, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteEntityTypeRequest(proto.Message): + r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. + + Attributes: + name (str): + Required. The name of the EntityType to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + force (bool): + If set to true, any Features for this + EntityType will also be deleted. (Otherwise, the + request will only work if the EntityType has no + Features.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class CreateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create a + Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature to create. + feature_id (str): + Required. The ID to use for the Feature, which will become + the final component of the Feature's resource name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within an EntityType. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + feature = proto.Field( + proto.MESSAGE, + number=2, + message=gca_feature.Feature, + ) + feature_id = proto.Field( + proto.STRING, + number=3, + ) + + +class BatchCreateFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create the + batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + requests (Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same parent + EntityType. The ``parent`` field in each child request + message can be omitted. If ``parent`` is set in a child + request, then the value must match the ``parent`` value in + this request message. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='CreateFeatureRequest', + ) + + +class BatchCreateFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1.types.Feature]): + The Features created. + """ + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + + +class GetFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. + + Attributes: + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + filter (str): + Lists the Features that match the filter expression. The + following filters are supported: + + - ``value_type``: Supports = and != comparisons. + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + 'env' as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 1000 Features will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``feature_id`` + - ``value_type`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + latest_stats_count (int): + If set, return the most recent + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count] + of stats for each Feature in response. Valid value is [0, + 10]. If number of stats exists < + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count], + return all existing stats. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + latest_stats_count = proto.Field( + proto.INT32, + number=7, + ) + + +class ListFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1.types.Feature]): + The Features matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1.ListFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class SearchFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Attributes: + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. Field-restricted + queries and filters can be combined using ``AND`` to form a + conjunction. + + A field query is in the form FIELD:QUERY. This implicitly + checks if QUERY exists as a substring within Feature's + FIELD. The QUERY and the FIELD are converted to a sequence + of words (i.e. tokens) for comparison. This is done by: + + - Removing leading/trailing whitespace and tokenizing the + search value. Characters that are not one of alphanumeric + ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are + treated as delimiters for tokens. ``*`` is treated as a + wildcard that matches characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double quotation + marks ("). With phrases, the order of the words is + important. Words in the phrase must be matching in order and + consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with ID + containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. + + Besides field queries, the following exact-match filters are + supported. The exact-match filters do not support wildcards. + Unlike field-restricted queries, exact-match filters are + case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + ``env`` as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 100 Features will be + returned. The maximum value is 100; any value + greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures], + except ``page_size``, must match the call that provided the + page token. + """ + + location = proto.Field( + proto.STRING, + number=1, + ) + query = proto.Field( + proto.STRING, + number=3, + ) + page_size = proto.Field( + proto.INT32, + number=4, + ) + page_token = proto.Field( + proto.STRING, + number=5, + ) + + +class SearchFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1.types.Feature]): + The Features matching the request. + + Fields returned: + + - ``name`` + - ``description`` + - ``labels`` + - ``create_time`` + - ``update_time`` + next_page_token (str): + A token, which can be sent as + [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1.SearchFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. + + Attributes: + feature (google.cloud.aiplatform_v1.types.Feature): + Required. The Feature's ``name`` field is used to identify + the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Features resource by the update. The fields specified + in the update_mask are relative to the resource, not the + full request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be overwritten. + Set the update_mask to ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``disable_monitoring`` + """ + + feature = proto.Field( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. + + Attributes: + name (str): + Required. The name of the Features to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform create Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UpdateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform update Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ImportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that perform import Feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore import + Feature values. + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of Feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + imported_entity_count = proto.Field( + proto.INT64, + number=2, + ) + imported_feature_value_count = proto.Field( + proto.INT64, + number=3, + ) + invalid_row_count = proto.Field( + proto.INT64, + number=6, + ) + + +class ExportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that exports Features values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore export + Feature values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class BatchReadFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that batch reads Feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Featurestore batch + read Features values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateEntityTypeOperationMetadata(proto.Message): + r"""Details of operations that perform create EntityType. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for EntityType. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateFeatureOperationMetadata(proto.Message): + r"""Details of operations that perform create Feature. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class BatchCreateFeaturesOperationMetadata(proto.Message): + r"""Details of operations that perform batch create Features. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py new file mode 100644 index 0000000000..a2b0916381 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import study +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'HyperparameterTuningJob', + }, +) + + +class HyperparameterTuningJob(proto.Message): + r"""Represents a HyperparameterTuningJob. A + HyperparameterTuningJob has a Study specification and multiple + CustomJobs with identical CustomJob specification. + + Attributes: + name (str): + Output only. Resource name of the + HyperparameterTuningJob. + display_name (str): + Required. The display name of the + HyperparameterTuningJob. The name can be up to + 128 characters long and can be consist of any + UTF-8 characters. + study_spec (google.cloud.aiplatform_v1.types.StudySpec): + Required. Study configuration of the + HyperparameterTuningJob. + max_trial_count (int): + Required. The desired total number of Trials. + parallel_trial_count (int): + Required. The desired number of Trials to run + in parallel. + max_failed_trial_count (int): + The number of failed Trials that need to be + seen before failing the HyperparameterTuningJob. + If set to 0, Vertex AI decides how many Trials + must fail before the whole job fails. + trial_job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): + Required. The spec of a trial job. The same + spec applies to the CustomJobs created in all + the trials. + trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): + Output only. Trials of the + HyperparameterTuningJob. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob for the + first time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob entered + any of the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was most recently + updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize HyperparameterTuningJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key options for a + HyperparameterTuningJob. If this is set, then + all resources created by the + HyperparameterTuningJob will be encrypted with + the provided encryption key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + study_spec = proto.Field( + proto.MESSAGE, + number=4, + message=study.StudySpec, + ) + max_trial_count = proto.Field( + proto.INT32, + number=5, + ) + parallel_trial_count = proto.Field( + proto.INT32, + number=6, + ) + max_failed_trial_count = proto.Field( + proto.INT32, + number=7, + ) + trial_job_spec = proto.Field( + proto.MESSAGE, + number=8, + message=custom_job.CustomJobSpec, + ) + trials = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=study.Trial, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=15, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=17, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py new file mode 100644 index 0000000000..9c213b84ce --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import deployed_index_ref +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Index', + }, +) + + +class Index(proto.Message): + r"""A representation of a collection of database items organized + in a way that allows for approximate nearest neighbor (a.k.a + ANN) algorithms search. + + Attributes: + name (str): + Output only. The resource name of the Index. + display_name (str): + Required. The display name of the Index. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Index. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Index, + that is specific to it. Unset if the Index does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + An additional information about the Index; the schema of the + metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1.Index.metadata_schema_uri]. + deployed_indexes (Sequence[google.cloud.aiplatform_v1.types.DeployedIndexRef]): + Output only. The pointers to DeployedIndexes + created from this Index. An Index can be only + deleted if all its DeployedIndexes had been + undeployed first. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Indexes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was most recently + updated. This also includes any update to the contents of + the Index. Note that Operations working on this Index may + have their + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1.GenericOperationMetadata.update_time] + a little after the value of this timestamp, yet that does + not mean their results are not already reflected in the + Index. Result of any successfully completed Operation on the + Index is reflected in it. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=4, + ) + metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, + number=7, + message=deployed_index_ref.DeployedIndexRef, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=9, + ) + create_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py new file mode 100644 index 0000000000..c42eb5872f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'IndexEndpoint', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexPrivateEndpoints', + }, +) + + +class IndexEndpoint(proto.Message): + r"""Indexes are deployed into it. An IndexEndpoint can have + multiple DeployedIndexes. + + Attributes: + name (str): + Output only. The resource name of the + IndexEndpoint. + display_name (str): + Required. The display name of the + IndexEndpoint. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + description (str): + The description of the IndexEndpoint. + deployed_indexes (Sequence[google.cloud.aiplatform_v1.types.DeployedIndex]): + Output only. The indexes deployed in this + endpoint. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your IndexEndpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was last updated. This timestamp + is not updated when the endpoint's + DeployedIndexes are updated, e.g. due to updates + of the original Indexes they are the deployments + of. + network (str): + Optional. The full name of the Google Compute Engine + `network `__ + to which the IndexEndpoint should be peered. + + Private services access must already be configured for the + network. If left unspecified, the Endpoint is not peered + with any network. + + [network][google.cloud.aiplatform.v1.IndexEndpoint.network] + and + [private_service_connect_config][google.cloud.aiplatform.v1.IndexEndpoint.private_service_connect_config] + are mutually exclusive. + + `Format `__: + projects/{project}/global/networks/{network}. Where + {project} is a project number, as in '12345', and {network} + is network name. + enable_private_service_connect (bool): + Optional. Deprecated: If true, expose the IndexEndpoint via + private service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1.IndexEndpoint.enable_private_service_connect], + can be set. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='DeployedIndex', + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + create_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + network = proto.Field( + proto.STRING, + number=9, + ) + enable_private_service_connect = proto.Field( + proto.BOOL, + number=10, + ) + + +class DeployedIndex(proto.Message): + r"""A deployment of an Index. IndexEndpoints contain one or more + DeployedIndexes. + + Attributes: + id (str): + Required. The user specified ID of the + DeployedIndex. The ID can be up to 128 + characters long and must start with a letter and + only contain letters, numbers, and underscores. + The ID must be unique within the project it is + created in. + index (str): + Required. The name of the Index this is the + deployment of. We may refer to this Index as the + DeployedIndex's "original" Index. + display_name (str): + The display name of the DeployedIndex. If not provided upon + creation, the Index's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedIndex + was created. + private_endpoints (google.cloud.aiplatform_v1.types.IndexPrivateEndpoints): + Output only. Provides paths for users to send requests + directly to the deployed index services running on Cloud via + private services access. This field is populated if + [network][google.cloud.aiplatform.v1.IndexEndpoint.network] + is configured. + index_sync_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The DeployedIndex may depend on various data on + its original Index. Additionally when certain changes to the + original Index are being done (e.g. when what the Index + contains is being changed) the DeployedIndex may be + asynchronously updated in the background to reflect this + changes. If this timestamp's value is at least the + [Index.update_time][google.cloud.aiplatform.v1.Index.update_time] + of the original Index, it means that this DeployedIndex and + the original Index are in sync. If this timestamp is older, + then to see which updates this DeployedIndex already + contains (and which not), one must + [list][Operations.ListOperations] [Operations][Operation] + [working][Operation.name] on the original Index. Only the + successfully completed Operations with + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1.GenericOperationMetadata.update_time] + equal or before this sync time are contained in this + DeployedIndex. + automatic_resources (google.cloud.aiplatform_v1.types.AutomaticResources): + Optional. A description of resources that the DeployedIndex + uses, which to large degree are decided by Vertex AI, and + optionally allows only a modest additional configuration. If + min_replica_count is not set, the default value is 2 (we + don't provide SLA when min_replica_count=1). If + max_replica_count is not set, the default value is + min_replica_count. The max allowed replica count is 1000. + dedicated_resources (google.cloud.aiplatform_v1.types.DedicatedResources): + Optional. A description of resources that are dedicated to + the DeployedIndex, and that need a higher degree of manual + configuration. If min_replica_count is not set, the default + value is 2 (we don't provide SLA when min_replica_count=1). + If max_replica_count is not set, the default value is + min_replica_count. The max allowed replica count is 1000. + + Available machine types: n1-standard-16 n1-standard-32 + enable_access_logging (bool): + Optional. If true, private endpoint's access + logs are sent to StackDriver Logging. + These logs are like standard server access logs, + containing information like timestamp and + latency for each MatchRequest. + Note that Stackdriver logs may incur a cost, + especially if the deployed index receives a high + queries per second rate (QPS). Estimate your + costs before enabling this option. + deployed_index_auth_config (google.cloud.aiplatform_v1.types.DeployedIndexAuthConfig): + Optional. If set, the authentication is + enabled for the private endpoint. + reserved_ip_ranges (Sequence[str]): + Optional. A list of reserved ip ranges under + the VPC network that can be used for this + DeployedIndex. + If set, we will deploy the index within the + provided ip ranges. Otherwise, the index might + be deployed to any ip ranges under the provided + VPC network. + + The value sohuld be the name of the address + (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) + Example: 'vertex-ai-ip-range'. + deployment_group (str): + Optional. The deployment group can be no longer than 64 + characters (eg: 'test', 'prod'). If not set, we will use the + 'default' deployment group. + + Creating ``deployment_groups`` with ``reserved_ip_ranges`` + is a recommended practice when the peered network has + multiple peering ranges. This creates your deployments from + predictable IP spaces for easier traffic administration. + Also, one deployment_group (except 'default') can only be + used with the same reserved_ip_ranges which means if the + deployment_group has been used with reserved_ip_ranges: [a, + b, c], using it with [a, b] or [d, e] is disallowed. + + Note: we only support up to 5 deployment groups(not + including 'default'). + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + index = proto.Field( + proto.STRING, + number=2, + ) + display_name = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + private_endpoints = proto.Field( + proto.MESSAGE, + number=5, + message='IndexPrivateEndpoints', + ) + index_sync_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + automatic_resources = proto.Field( + proto.MESSAGE, + number=7, + message=machine_resources.AutomaticResources, + ) + dedicated_resources = proto.Field( + proto.MESSAGE, + number=16, + message=machine_resources.DedicatedResources, + ) + enable_access_logging = proto.Field( + proto.BOOL, + number=8, + ) + deployed_index_auth_config = proto.Field( + proto.MESSAGE, + number=9, + message='DeployedIndexAuthConfig', + ) + reserved_ip_ranges = proto.RepeatedField( + proto.STRING, + number=10, + ) + deployment_group = proto.Field( + proto.STRING, + number=11, + ) + + +class DeployedIndexAuthConfig(proto.Message): + r"""Used to set up the auth on the DeployedIndex's private + endpoint. + + Attributes: + auth_provider (google.cloud.aiplatform_v1.types.DeployedIndexAuthConfig.AuthProvider): + Defines the authentication provider that the + DeployedIndex uses. + """ + + class AuthProvider(proto.Message): + r"""Configuration for an authentication provider, including support for + `JSON Web Token + (JWT) `__. + + Attributes: + audiences (Sequence[str]): + The list of JWT + `audiences `__. + that are allowed to access. A JWT containing any of these + audiences will be accepted. + allowed_issuers (Sequence[str]): + A list of allowed JWT issuers. Each entry must be a valid + Google service account, in the following format: + + ``service-account-name@project-id.iam.gserviceaccount.com`` + """ + + audiences = proto.RepeatedField( + proto.STRING, + number=1, + ) + allowed_issuers = proto.RepeatedField( + proto.STRING, + number=2, + ) + + auth_provider = proto.Field( + proto.MESSAGE, + number=1, + message=AuthProvider, + ) + + +class IndexPrivateEndpoints(proto.Message): + r"""IndexPrivateEndpoints proto is used to provide paths for users to + send requests via private endpoints (e.g. private service access, + private service connect). To send request via private service + access, use match_grpc_address. To send request via private service + connect, use service_attachment. + + Attributes: + match_grpc_address (str): + Output only. The ip address used to send + match gRPC requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. + """ + + match_grpc_address = proto.Field( + proto.STRING, + number=1, + ) + service_attachment = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py new file mode 100644 index 0000000000..f03adf2a44 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateIndexEndpointRequest', + 'CreateIndexEndpointOperationMetadata', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'UpdateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'DeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UndeployIndexOperationMetadata', + 'MutateDeployedIndexRequest', + 'MutateDeployedIndexResponse', + 'MutateDeployedIndexOperationMetadata', + }, +) + + +class CreateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): + Required. The IndexEndpoint to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + index_endpoint = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.IndexEndpoint, + ) + + +class CreateIndexEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint] + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListIndexEndpointsRequest(proto.Message): + r"""Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``index_endpoint`` supports = and !=. ``index_endpoint`` + represents the IndexEndpoint ID, ie. the last segment of + the IndexEndpoint's + [resourcename][google.cloud.aiplatform.v1.IndexEndpoint.name]. + - ``display_name`` supports =, != and regex() (uses + `re2 `__ + syntax) + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality + ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a + key"`. + + Some examples: + + - ``index_endpoint="1"`` + - ``display_name="myDisplayName"`` + - \`regex(display_name, "^A") -> The display name starts + with an A. + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1.ListIndexEndpointsResponse.next_page_token] + of the previous + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListIndexEndpointsResponse(proto.Message): + r"""Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + index_endpoints (Sequence[google.cloud.aiplatform_v1.types.IndexEndpoint]): + List of IndexEndpoints in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1.ListIndexEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + index_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.IndexEndpoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint]. + + Attributes: + index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): + Required. The IndexEndpoint which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + index_endpoint = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.IndexEndpoint, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint]. + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + Required. The DeployedIndex to be created + within the IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + The DeployedIndex that had been deployed in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + deployed_index_id (str): + The unique index id specified by user + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +class UndeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource from which + to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index_id (str): + Required. The ID of the DeployedIndex to be + undeployed from the IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +class UndeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. + + """ + + +class UndeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class MutateDeployedIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + The DeployedIndex that had been updated in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + deployed_index_id (str): + The unique index id specified by user + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py new file mode 100644 index 0000000000..90a9714cb8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import index as gca_index +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateIndexRequest', + 'CreateIndexOperationMetadata', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'UpdateIndexRequest', + 'UpdateIndexOperationMetadata', + 'DeleteIndexRequest', + 'NearestNeighborSearchOperationMetadata', + }, +) + + +class CreateIndexRequest(proto.Message): + r"""Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Index in. Format: + ``projects/{project}/locations/{location}`` + index (google.cloud.aiplatform_v1.types.Index): + Required. The Index to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index.Index, + ) + + +class CreateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, + number=2, + message='NearestNeighborSearchOperationMetadata', + ) + + +class GetIndexRequest(proto.Message): + r"""Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1.IndexService.GetIndex] + + Attributes: + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListIndexesRequest(proto.Message): + r"""Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Indexes. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1.ListIndexesResponse.next_page_token] + of the previous + [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListIndexesResponse(proto.Message): + r"""Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. + + Attributes: + indexes (Sequence[google.cloud.aiplatform_v1.types.Index]): + List of indexes in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexesRequest.page_token][google.cloud.aiplatform.v1.ListIndexesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + indexes = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_index.Index, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateIndexRequest(proto.Message): + r"""Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. + + Attributes: + index (google.cloud.aiplatform_v1.types.Index): + Required. The Index which updates the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index.Index, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, + number=2, + message='NearestNeighborSearchOperationMetadata', + ) + + +class DeleteIndexRequest(proto.Message): + r"""Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1.IndexService.DeleteIndex]. + + Attributes: + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class NearestNeighborSearchOperationMetadata(proto.Message): + r"""Runtime operation metadata with regard to Matching Engine + Index. + + Attributes: + content_validation_stats (Sequence[google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata.ContentValidationStats]): + The validation stats of the content (per file) to be + inserted or updated on the Matching Engine Index resource. + Populated if contentsDeltaUri is provided as part of + [Index.metadata][google.cloud.aiplatform.v1.Index.metadata]. + Please note that, currently for those files that are broken + or has unsupported file format, we will not have the stats + for those files. + data_bytes_count (int): + The ingested data size in bytes. + """ + + class RecordError(proto.Message): + r""" + + Attributes: + error_type (google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): + The error type of this record. + error_message (str): + A human-readable message that is shown to the user to help + them fix the error. Note that this message may change from + time to time, your code should check against error_type as + the source of truth. + source_gcs_uri (str): + Cloud Storage URI pointing to the original + file in user's bucket. + embedding_id (str): + Empty if the embedding id is failed to parse. + raw_record (str): + The original content of this record. + """ + class RecordErrorType(proto.Enum): + r"""""" + ERROR_TYPE_UNSPECIFIED = 0 + EMPTY_LINE = 1 + INVALID_JSON_SYNTAX = 2 + INVALID_CSV_SYNTAX = 3 + INVALID_AVRO_SYNTAX = 4 + INVALID_EMBEDDING_ID = 5 + EMBEDDING_SIZE_MISMATCH = 6 + NAMESPACE_MISSING = 7 + + error_type = proto.Field( + proto.ENUM, + number=1, + enum='NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType', + ) + error_message = proto.Field( + proto.STRING, + number=2, + ) + source_gcs_uri = proto.Field( + proto.STRING, + number=3, + ) + embedding_id = proto.Field( + proto.STRING, + number=4, + ) + raw_record = proto.Field( + proto.STRING, + number=5, + ) + + class ContentValidationStats(proto.Message): + r""" + + Attributes: + source_gcs_uri (str): + Cloud Storage URI pointing to the original + file in user's bucket. + valid_record_count (int): + Number of records in this file that were + successfully processed. + invalid_record_count (int): + Number of records in this file we skipped due + to validate errors. + partial_errors (Sequence[google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata.RecordError]): + The detail information of the partial + failures encountered for those invalid records + that couldn't be parsed. Up to 50 partial errors + will be reported. + """ + + source_gcs_uri = proto.Field( + proto.STRING, + number=1, + ) + valid_record_count = proto.Field( + proto.INT64, + number=2, + ) + invalid_record_count = proto.Field( + proto.INT64, + number=3, + ) + partial_errors = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='NearestNeighborSearchOperationMetadata.RecordError', + ) + + content_validation_stats = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=ContentValidationStats, + ) + data_bytes_count = proto.Field( + proto.INT64, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py new file mode 100644 index 0000000000..19163ec837 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'AvroSource', + 'CsvSource', + 'GcsSource', + 'GcsDestination', + 'BigQuerySource', + 'BigQueryDestination', + 'CsvDestination', + 'TFRecordDestination', + 'ContainerRegistryDestination', + }, +) + + +class AvroSource(proto.Message): + r"""The storage details for Avro input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class CsvSource(proto.Message): + r"""The storage details for CSV input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + + Attributes: + uris (Sequence[str]): + Required. Google Cloud Storage URI(-s) to the + input file(s). May contain wildcards. For more + information on wildcards, see + https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + """ + + uris = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output + directory. If the uri doesn't end with '/', a + '/' will be automatically appended. The + directory is created if it doesn't exist. + """ + + output_uri_prefix = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQuerySource(proto.Message): + r"""The BigQuery location for the input content. + + Attributes: + input_uri (str): + Required. BigQuery URI to a table, up to 2000 characters + long. Accepted forms: + + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + input_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQueryDestination(proto.Message): + r"""The BigQuery location for the output content. + + Attributes: + output_uri (str): + Required. BigQuery URI to a project or table, up to 2000 + characters long. + + When only the project is specified, the Dataset and Table is + created. When the full table reference is specified, the + Dataset must exist and table must not exist. + + Accepted forms: + + - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId`` or + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class CsvDestination(proto.Message): + r"""The storage details for CSV output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + message='GcsDestination', + ) + + +class TFRecordDestination(proto.Message): + r"""The storage details for TFRecord output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + message='GcsDestination', + ) + + +class ContainerRegistryDestination(proto.Message): + r"""The Container Registry location for the container image. + + Attributes: + output_uri (str): + Required. Container Registry URI of a container image. Only + Google Container Registry and Artifact Registry are + supported now. Accepted forms: + + - Google Container Registry path. For example: + ``gcr.io/projectId/imageName:tag``. + + - Artifact Registry path. For example: + ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. + + If a tag is not specified, "latest" will be used as the + default tag. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py new file mode 100644 index 0000000000..be3f561727 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py @@ -0,0 +1,1093 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateCustomJobRequest', + 'GetCustomJobRequest', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'DeleteCustomJobRequest', + 'CancelCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'GetDataLabelingJobRequest', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'DeleteDataLabelingJobRequest', + 'CancelDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'GetHyperparameterTuningJobRequest', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'DeleteHyperparameterTuningJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'GetBatchPredictionJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'DeleteBatchPredictionJobRequest', + 'CancelBatchPredictionJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'GetModelDeploymentMonitoringJobRequest', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'UpdateModelDeploymentMonitoringJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + }, +) + + +class CreateCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + CustomJob in. Format: + ``projects/{project}/locations/{location}`` + custom_job (google.cloud.aiplatform_v1.types.CustomJob): + Required. The CustomJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + custom_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_custom_job.CustomJob, + ) + + +class GetCustomJobRequest(proto.Message): + r"""Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListCustomJobsRequest(proto.Message): + r"""Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListCustomJobsResponse.next_page_token] + of the previous + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListCustomJobsResponse(proto.Message): + r"""Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + + Attributes: + custom_jobs (Sequence[google.cloud.aiplatform_v1.types.CustomJob]): + List of CustomJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1.ListCustomJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + custom_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_custom_job.CustomJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteCustomJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource to be deleted. + Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): + Required. The DataLabelingJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + data_labeling_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_data_labeling_job.DataLabelingJob, + ) + + +class GetDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDataLabelingJobsRequest(proto.Message): + r"""Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. FieldMask represents a + set of symbolic field paths. For example, the mask can be + ``paths: "name"``. The "name" here is a field in + DataLabelingJob. If this field is not set, all fields of the + DataLabelingJob are returned. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order by default. Use ``desc`` after a field name + for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataLabelingJobsResponse(proto.Message): + r"""Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Attributes: + data_labeling_jobs (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob]): + A list of DataLabelingJobs that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_labeling_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_data_labeling_job.DataLabelingJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + hyperparameter_tuning_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + + +class GetHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListHyperparameterTuningJobsRequest(proto.Message): + r"""Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsResponse.next_page_token] + of the previous + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListHyperparameterTuningJobsResponse(proto.Message): + r"""Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + + Attributes: + hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob]): + List of HyperparameterTuningJobs in the requested page. + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1.HyperparameterTuningJob.trials] + of the jobs will be not be returned. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + hyperparameter_tuning_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource + to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob to cancel. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): + Required. The BatchPredictionJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + batch_prediction_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + + +class GetBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListBatchPredictionJobsRequest(proto.Message): + r"""Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + - ``model_display_name`` supports = and != + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsResponse.next_page_token] + of the previous + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListBatchPredictionJobsResponse(proto.Message): + r"""Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + + Attributes: + batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob]): + List of BatchPredictionJobs in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + batch_prediction_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): + Required. The ModelDeploymentMonitoringJob to + create + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + + +class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): + r"""Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + deployed_model_id (str): + Required. The DeployedModel ID of the + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + feature_display_name (str): + The feature display name. If specified, only return the + stats belonging to this feature. Format: + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name][google.cloud.aiplatform.v1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name], + example: "user_destination". + objectives (Sequence[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest.StatsAnomaliesObjective]): + Required. Objectives of the stats to + retrieve. + page_size (int): + The standard list page size. + page_token (str): + A page token received from a previous + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The earliest timestamp of stats being + generated. If not set, indicates fetching stats + till the earliest possible one. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The latest timestamp of stats being + generated. If not set, indicates feching stats + till the latest possible one. + """ + + class StatsAnomaliesObjective(proto.Message): + r"""Stats requested for specific objective. + + Attributes: + type_ (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveType): + + top_feature_count (int): + If set, all attribution scores between + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] + and + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] + are fetched, and page token doesn't take affect in this + case. Only used to retrieve attribution score for the top + Features which has the highest attribution score in the + latest monitoring run. + """ + + type_ = proto.Field( + proto.ENUM, + number=1, + enum=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringObjectiveType, + ) + top_feature_count = proto.Field( + proto.INT32, + number=4, + ) + + model_deployment_monitoring_job = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + feature_display_name = proto.Field( + proto.STRING, + number=3, + ) + objectives = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=StatsAnomaliesObjective, + ) + page_size = proto.Field( + proto.INT32, + number=5, + ) + page_token = proto.Field( + proto.STRING, + number=6, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + + +class SearchModelDeploymentMonitoringStatsAnomaliesResponse(proto.Message): + r"""Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + monitoring_stats (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringStatsAnomalies]): + Stats retrieved for requested objectives. There are at most + 1000 + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats][google.cloud.aiplatform.v1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats] + in the response. + next_page_token (str): + The page token that can be used by the next + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + """ + + @property + def raw_page(self): + return self + + monitoring_stats = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelDeploymentMonitoringJobsRequest(proto.Message): + r"""Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelDeploymentMonitoringJobsResponse(proto.Message): + r"""Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + model_deployment_monitoring_jobs (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob]): + A list of ModelDeploymentMonitoringJobs that + matches the specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + model_deployment_monitoring_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring configuration + which replaces the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask is used to specify the fields to + be overwritten in the ModelDeploymentMonitoringJob resource + by the update. The fields specified in the update_mask are + relative to the resource, not the full request. A field will + be overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in the + request will be overwritten. Set the update_mask to ``*`` to + override all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + """ + + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the model monitoring job to + delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class PauseModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ResumeModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateModelDeploymentMonitoringJobOperationMetadata(proto.Message): + r"""Runtime operation information for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py new file mode 100644 index 0000000000..dbaebb298f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'JobState', + }, +) + + +class JobState(proto.Enum): + r"""Describes the state of a job.""" + JOB_STATE_UNSPECIFIED = 0 + JOB_STATE_QUEUED = 1 + JOB_STATE_PENDING = 2 + JOB_STATE_RUNNING = 3 + JOB_STATE_SUCCEEDED = 4 + JOB_STATE_FAILED = 5 + JOB_STATE_CANCELLING = 6 + JOB_STATE_CANCELLED = 7 + JOB_STATE_PAUSED = 8 + JOB_STATE_EXPIRED = 9 + JOB_STATE_UPDATING = 10 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py new file mode 100644 index 0000000000..0e285b1173 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'LineageSubgraph', + }, +) + + +class LineageSubgraph(proto.Message): + r"""A subgraph of the overall lineage graph. Event edges connect + Artifact and Execution nodes. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): + The Artifact nodes in the subgraph. + executions (Sequence[google.cloud.aiplatform_v1.types.Execution]): + The Execution nodes in the subgraph. + events (Sequence[google.cloud.aiplatform_v1.types.Event]): + The Event edges between Artifacts and + Executions in the subgraph. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=artifact.Artifact, + ) + executions = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=execution.Execution, + ) + events = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=event.Event, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py new file mode 100644 index 0000000000..704836bef9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import accelerator_type as gca_accelerator_type + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'MachineSpec', + 'DedicatedResources', + 'AutomaticResources', + 'BatchDedicatedResources', + 'ResourcesConsumed', + 'DiskSpec', + 'NfsMount', + 'AutoscalingMetricSpec', + }, +) + + +class MachineSpec(proto.Message): + r"""Specification of a single machine. + + Attributes: + machine_type (str): + Immutable. The type of the machine. + + See the `list of machine types supported for + prediction `__ + + See the `list of machine types supported for custom + training `__. + + For + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + this field is optional, and the default value is + ``n1-standard-2``. For + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob] + or as part of + [WorkerPoolSpec][google.cloud.aiplatform.v1.WorkerPoolSpec] + this field is required. + accelerator_type (google.cloud.aiplatform_v1.types.AcceleratorType): + Immutable. The type of accelerator(s) that may be attached + to the machine as per + [accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count]. + accelerator_count (int): + The number of accelerators to attach to the + machine. + """ + + machine_type = proto.Field( + proto.STRING, + number=1, + ) + accelerator_type = proto.Field( + proto.ENUM, + number=2, + enum=gca_accelerator_type.AcceleratorType, + ) + accelerator_count = proto.Field( + proto.INT32, + number=3, + ) + + +class DedicatedResources(proto.Message): + r"""A description of resources that are dedicated to a + DeployedModel, and that need a higher degree of manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Required. Immutable. The specification of a + single machine used by the prediction. + min_replica_count (int): + Required. Immutable. The minimum number of + machine replicas this DeployedModel will be + always deployed on. This value must be greater + than or equal to 1. + If traffic against the DeployedModel increases, + it may dynamically be deployed onto more + replicas, and as traffic decreases, some of + these extra replicas may be freed. + max_replica_count (int): + Immutable. The maximum number of replicas this DeployedModel + may be deployed on when the traffic against it increases. If + the requested value is too large, the deployment will error, + but if deployment succeeds then the ability to scale the + model to that many replicas is guaranteed (barring service + outages). If traffic against the DeployedModel increases + beyond what its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is not provided, + will use + [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] + as the default value. + + The value of this field impacts the charge against Vertex + CPU and GPU quotas. Specifically, you will be charged for + (max_replica_count \* number of cores in the selected + machine type) and (max_replica_count \* number of GPUs per + replica in the selected machine type). + autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]): + Immutable. The metric specifications that overrides a + resource utilization metric (CPU utilization, accelerator's + duty cycle, and so on) target value (default to 60 if not + set). At most one entry is allowed per metric. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is above 0, the autoscaling will be based on both CPU + utilization and accelerator's duty cycle metrics and scale + up when either metrics exceeds its target value while scale + down if both metrics are under their target value. The + default target value is 60 for both metrics. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is 0, the autoscaling will be based on CPU utilization + metric only with default target value 60 if not explicitly + set. + + For example, in the case of Online Prediction, if you want + to override target CPU utilization to 80, you should set + [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1.AutoscalingMetricSpec.metric_name] + to + ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + and + [autoscaling_metric_specs.target][google.cloud.aiplatform.v1.AutoscalingMetricSpec.target] + to ``80``. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + min_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + autoscaling_metric_specs = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='AutoscalingMetricSpec', + ) + + +class AutomaticResources(proto.Message): + r"""A description of resources that to large degree are decided + by Vertex AI, and require only a modest additional + configuration. Each Model supporting these resources documents + its specific guidelines. + + Attributes: + min_replica_count (int): + Immutable. The minimum number of replicas this DeployedModel + will be always deployed on. If traffic against it increases, + it may dynamically be deployed onto more replicas up to + [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], + and as traffic decreases, some of these extra replicas may + be freed. If the requested value is too large, the + deployment will error. + max_replica_count (int): + Immutable. The maximum number of replicas + this DeployedModel may be deployed on when the + traffic against it increases. If the requested + value is too large, the deployment will error, + but if deployment succeeds then the ability to + scale the model to that many replicas is + guaranteed (barring service outages). If traffic + against the DeployedModel increases beyond what + its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is + not provided, a no upper bound for scaling under + heavy traffic will be assume, though Vertex AI + may be unable to scale beyond certain replica + number. + """ + + min_replica_count = proto.Field( + proto.INT32, + number=1, + ) + max_replica_count = proto.Field( + proto.INT32, + number=2, + ) + + +class BatchDedicatedResources(proto.Message): + r"""A description of resources that are used for performing batch + operations, are dedicated to a Model, and need manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Required. Immutable. The specification of a + single machine. + starting_replica_count (int): + Immutable. The number of machine replicas used at the start + of the batch operation. If not set, Vertex AI decides + starting number, not greater than + [max_replica_count][google.cloud.aiplatform.v1.BatchDedicatedResources.max_replica_count] + max_replica_count (int): + Immutable. The maximum number of machine + replicas the batch operation may be scaled to. + The default value is 10. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + starting_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + + +class ResourcesConsumed(proto.Message): + r"""Statistics information about resource consumption. + + Attributes: + replica_hours (float): + Output only. The number of replica hours + used. Note that many replicas may run in + parallel, and additionally any given work may be + queued for some time. Therefore this value is + not strictly related to wall time. + """ + + replica_hours = proto.Field( + proto.DOUBLE, + number=1, + ) + + +class DiskSpec(proto.Message): + r"""Represents the spec of disk options. + + Attributes: + boot_disk_type (str): + Type of the boot disk (default is "pd-ssd"). + Valid values: "pd-ssd" (Persistent Disk Solid + State Drive) or "pd-standard" (Persistent Disk + Hard Disk Drive). + boot_disk_size_gb (int): + Size in GB of the boot disk (default is + 100GB). + """ + + boot_disk_type = proto.Field( + proto.STRING, + number=1, + ) + boot_disk_size_gb = proto.Field( + proto.INT32, + number=2, + ) + + +class NfsMount(proto.Message): + r"""Represents a mount configuration for Network File System + (NFS) to mount. + + Attributes: + server (str): + Required. IP address of the NFS server. + path (str): + Required. Source path exported from NFS server. Has to start + with '/', and combined with the ip address, it indicates the + source mount path in the form of ``server:path`` + mount_point (str): + Required. Destination mount path. The NFS will be mounted + for the user under /mnt/nfs/ + """ + + server = proto.Field( + proto.STRING, + number=1, + ) + path = proto.Field( + proto.STRING, + number=2, + ) + mount_point = proto.Field( + proto.STRING, + number=3, + ) + + +class AutoscalingMetricSpec(proto.Message): + r"""The metric specification that defines the target resource + utilization (CPU utilization, accelerator's duty cycle, and so + on) for calculating the desired replica count. + + Attributes: + metric_name (str): + Required. The resource metric name. Supported metrics: + + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + target (int): + The target resource utilization in percentage + (1% - 100%) for the given metric; once the real + usage deviates from the target by a certain + percentage, the machine replicas change. The + default value is 60 (representing 60%) if not + provided. + """ + + metric_name = proto.Field( + proto.STRING, + number=1, + ) + target = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py new file mode 100644 index 0000000000..ebe78835bf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ManualBatchTuningParameters', + }, +) + + +class ManualBatchTuningParameters(proto.Message): + r"""Manual batch tuning parameters. + + Attributes: + batch_size (int): + Immutable. The number of the records (e.g. + instances) of the operation given in each batch + to a machine replica. Machine type, and size of + a single record should be considered when + setting this parameter, higher value speeds up + the batch operation's execution, but too high + value will result in a whole batch not fitting + in a machine's memory, and the whole operation + will fail. + The default value is 64. + """ + + batch_size = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py new file mode 100644 index 0000000000..e4c68daaf2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'MetadataSchema', + }, +) + + +class MetadataSchema(proto.Message): + r"""Instance of a general MetadataSchema. + + Attributes: + name (str): + Output only. The resource name of the + MetadataSchema. + schema_version (str): + The version of the MetadataSchema. The version's format must + match the following regular expression: + ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to + order/compare different versions. Example: 1.0.0, 1.0.1, + etc. + schema (str): + Required. The raw YAML string representation of the + MetadataSchema. The combination of [MetadataSchema.version] + and the schema name given by ``title`` in + [MetadataSchema.schema] must be unique within a + MetadataStore. + + The schema is defined as an OpenAPI 3.0.2 `MetadataSchema + Object `__ + schema_type (google.cloud.aiplatform_v1.types.MetadataSchema.MetadataSchemaType): + The type of the MetadataSchema. This is a + property that identifies which metadata types + will use the MetadataSchema. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataSchema was created. + description (str): + Description of the Metadata Schema + """ + class MetadataSchemaType(proto.Enum): + r"""Describes the type of the MetadataSchema.""" + METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 + ARTIFACT_TYPE = 1 + EXECUTION_TYPE = 2 + CONTEXT_TYPE = 3 + + name = proto.Field( + proto.STRING, + number=1, + ) + schema_version = proto.Field( + proto.STRING, + number=2, + ) + schema = proto.Field( + proto.STRING, + number=3, + ) + schema_type = proto.Field( + proto.ENUM, + number=4, + enum=MetadataSchemaType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + description = proto.Field( + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py new file mode 100644 index 0000000000..01dbe8770c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py @@ -0,0 +1,1477 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateMetadataStoreRequest', + 'CreateMetadataStoreOperationMetadata', + 'GetMetadataStoreRequest', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'DeleteMetadataStoreRequest', + 'DeleteMetadataStoreOperationMetadata', + 'CreateArtifactRequest', + 'GetArtifactRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'UpdateArtifactRequest', + 'DeleteArtifactRequest', + 'PurgeArtifactsRequest', + 'PurgeArtifactsResponse', + 'PurgeArtifactsMetadata', + 'CreateContextRequest', + 'GetContextRequest', + 'ListContextsRequest', + 'ListContextsResponse', + 'UpdateContextRequest', + 'DeleteContextRequest', + 'PurgeContextsRequest', + 'PurgeContextsResponse', + 'PurgeContextsMetadata', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'QueryContextLineageSubgraphRequest', + 'CreateExecutionRequest', + 'GetExecutionRequest', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'UpdateExecutionRequest', + 'DeleteExecutionRequest', + 'PurgeExecutionsRequest', + 'PurgeExecutionsResponse', + 'PurgeExecutionsMetadata', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'QueryExecutionInputsAndOutputsRequest', + 'CreateMetadataSchemaRequest', + 'GetMetadataSchemaRequest', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'QueryArtifactLineageSubgraphRequest', + }, +) + + +class CreateMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + + Attributes: + parent (str): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + metadata_store (google.cloud.aiplatform_v1.types.MetadataStore): + Required. The MetadataStore to create. + metadata_store_id (str): + The {metadatastore} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataStore.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + metadata_store = proto.Field( + proto.MESSAGE, + number=2, + message=gca_metadata_store.MetadataStore, + ) + metadata_store_id = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for creating a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMetadataStoresRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Attributes: + parent (str): + Required. The Location whose MetadataStores should be + listed. Format: ``projects/{project}/locations/{location}`` + page_size (int): + The maximum number of Metadata Stores to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListMetadataStoresResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. + + Attributes: + metadata_stores (Sequence[google.cloud.aiplatform_v1.types.MetadataStore]): + The MetadataStores found for the Location. + next_page_token (str): + A token, which can be sent as + [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1.ListMetadataStoresRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_stores = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_metadata_store.MetadataStore, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the MetadataStore to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + force (bool): + Deprecated: Field is no longer supported. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class DeleteMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for deleting a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact to create. + artifact_id (str): + The {artifact} portion of the resource name with the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Artifacts in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Artifact.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + artifact = proto.Field( + proto.MESSAGE, + number=2, + message=gca_artifact.Artifact, + ) + artifact_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Attributes: + parent (str): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Artifacts to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + The supported set of filters include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Artifacts based on + the contexts to which they belong, use the function + operator with the full resource name + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): + The Artifacts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1.ListArtifactsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_artifact.Artifact, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. + + Attributes: + artifact (google.cloud.aiplatform_v1.types.Artifact): + Required. The Artifact containing updates. The Artifact's + [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Artifact][google.cloud.aiplatform.v1.Artifact] is not + found, a new [Artifact][google.cloud.aiplatform.v1.Artifact] + is created. + """ + + artifact = proto.Field( + proto.MESSAGE, + number=1, + message=gca_artifact.Artifact, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + etag (str): + Optional. The etag of the Artifact to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + etag = proto.Field( + proto.STRING, + number=2, + ) + + +class PurgeArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + Attributes: + parent (str): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Artifacts to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Artifact names that would be deleted. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + force = proto.Field( + proto.BOOL, + number=3, + ) + + +class PurgeArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + Attributes: + purge_count (int): + The number of Artifacts that this request deleted (or, if + ``force`` is false, the number of Artifacts that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Artifact names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field( + proto.INT64, + number=1, + ) + purge_sample = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class PurgeArtifactsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for purging Artifacts. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateContextRequest(proto.Message): + r"""Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context to create. + context_id (str): + The {context} portion of the resource name with the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Contexts in the parent MetadataStore. (Otherwise the request + will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the + caller can't view the preexisting Context.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + context = proto.Field( + proto.MESSAGE, + number=2, + message=gca_context.Context, + ) + context_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetContextRequest(proto.Message): + r"""Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. + + Attributes: + name (str): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListContextsRequest(proto.Message): + r"""Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + + Attributes: + parent (str): + Required. The MetadataStore whose Contexts should be listed. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Contexts to return. The + service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Contexts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + Following are the supported set of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0``. + + - **Parent Child filtering**: To filter Contexts based on + parent-child relationship use the HAS operator as + follows: + + :: + + parent_contexts: + "projects//locations//metadataStores//contexts/" + child_contexts: + "projects//locations//metadataStores//contexts/" + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListContextsResponse(proto.Message): + r"""Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. + + Attributes: + contexts (Sequence[google.cloud.aiplatform_v1.types.Context]): + The Contexts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListContextsRequest.page_token][google.cloud.aiplatform.v1.ListContextsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + contexts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_context.Context, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateContextRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. + + Attributes: + context (google.cloud.aiplatform_v1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1.Context.name] + field is used to identify the Context to be updated. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Context][google.cloud.aiplatform.v1.Context] is not found, + a new [Context][google.cloud.aiplatform.v1.Context] is + created. + """ + + context = proto.Field( + proto.MESSAGE, + number=1, + message=gca_context.Context, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteContextRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. + + Attributes: + name (str): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + force (bool): + The force deletion semantics is still + undefined. Users should not use this field. + etag (str): + Optional. The etag of the Context to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + etag = proto.Field( + proto.STRING, + number=3, + ) + + +class PurgeContextsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + Attributes: + parent (str): + Required. The metadata store to purge Contexts from. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Contexts to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Context names that would be deleted. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + force = proto.Field( + proto.BOOL, + number=3, + ) + + +class PurgeContextsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + Attributes: + purge_count (int): + The number of Contexts that this request deleted (or, if + ``force`` is false, the number of Contexts that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Context names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field( + proto.INT64, + number=1, + ) + purge_sample = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class PurgeContextsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for purging Contexts. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class AddContextArtifactsAndExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + Attributes: + context (str): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + artifacts (Sequence[str]): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + executions (Sequence[str]): + The resource names of the Executions to associate with the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + artifacts = proto.RepeatedField( + proto.STRING, + number=2, + ) + executions = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class AddContextArtifactsAndExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + + +class AddContextChildrenRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + Attributes: + context (str): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + child_contexts (Sequence[str]): + The resource names of the child Contexts. + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + child_contexts = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class AddContextChildrenResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. + + """ + + +class QueryContextLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. + + Attributes: + context (str): + Required. The resource name of the Context whose Artifacts + and Executions should be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution to create. + execution_id (str): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Executions in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Execution.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + execution = proto.Field( + proto.MESSAGE, + number=2, + message=gca_execution.Execution, + ) + execution_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Attributes: + parent (str): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Executions to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with an INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Executions + to satisfy in order to be part of the result set. The syntax + to define filter query is based on + https://google.aip.dev/160. Following are the supported set + of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``state``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..`` For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Executions based + on the contexts to which they belong use the function + operator with the full resource name: + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. + + Attributes: + executions (Sequence[google.cloud.aiplatform_v1.types.Execution]): + The Executions retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1.ListExecutionsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + executions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_execution.Execution, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. + + Attributes: + execution (google.cloud.aiplatform_v1.types.Execution): + Required. The Execution containing updates. The Execution's + [Execution.name][google.cloud.aiplatform.v1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Execution][google.cloud.aiplatform.v1.Execution] is not + found, a new + [Execution][google.cloud.aiplatform.v1.Execution] is + created. + """ + + execution = proto.Field( + proto.MESSAGE, + number=1, + message=gca_execution.Execution, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + etag (str): + Optional. The etag of the Execution to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + etag = proto.Field( + proto.STRING, + number=2, + ) + + +class PurgeExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + Attributes: + parent (str): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Executions to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Execution names that would be deleted. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + force = proto.Field( + proto.BOOL, + number=3, + ) + + +class PurgeExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + Attributes: + purge_count (int): + The number of Executions that this request deleted (or, if + ``force`` is false, the number of Executions that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Execution names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field( + proto.INT64, + number=1, + ) + purge_sample = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class PurgeExecutionsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for purging Executions. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class AddExecutionEventsRequest(proto.Message): + r"""Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + Attributes: + execution (str): + Required. The resource name of the Execution that the Events + connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + events (Sequence[google.cloud.aiplatform_v1.types.Event]): + The Events to create and add. + """ + + execution = proto.Field( + proto.STRING, + number=1, + ) + events = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=event.Event, + ) + + +class AddExecutionEventsResponse(proto.Message): + r"""Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. + + """ + + +class QueryExecutionInputsAndOutputsRequest(proto.Message): + r"""Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. + + Attributes: + execution (str): + Required. The resource name of the Execution whose input and + output Artifacts should be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + execution = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + metadata_schema (google.cloud.aiplatform_v1.types.MetadataSchema): + Required. The MetadataSchema to create. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataSchema.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + metadata_schema = proto.Field( + proto.MESSAGE, + number=2, + message=gca_metadata_schema.MetadataSchema, + ) + metadata_schema_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. + + Attributes: + name (str): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMetadataSchemasRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Attributes: + parent (str): + Required. The MetadataStore whose MetadataSchemas should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of MetadataSchemas to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas] + call. Provide this to retrieve the next page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + A query to filter available MetadataSchemas + for matching results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListMetadataSchemasResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. + + Attributes: + metadata_schemas (Sequence[google.cloud.aiplatform_v1.types.MetadataSchema]): + The MetadataSchemas found for the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1.ListMetadataSchemasRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_schemas = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_metadata_schema.MetadataSchema, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class QueryArtifactLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. + + Attributes: + artifact (str): + Required. The resource name of the Artifact whose Lineage + needs to be retrieved as a LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + max_hops (int): + Specifies the size of the lineage graph in terms of number + of hops from the specified artifact. Negative Value: + INVALID_ARGUMENT error is returned 0: Only input artifact is + returned. No value: Transitive closure is performed to + return the complete graph. + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the Lineage Subgraph. The + syntax to define filter query is based on + https://google.aip.dev/160. The supported set of filters + include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"`` Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + artifact = proto.Field( + proto.STRING, + number=1, + ) + max_hops = proto.Field( + proto.INT32, + number=2, + ) + filter = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py new file mode 100644 index 0000000000..5c4e8bf4c5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'MetadataStore', + }, +) + + +class MetadataStore(proto.Message): + r"""Instance of a metadata store. Contains a set of metadata that + can be queried. + + Attributes: + name (str): + Output only. The resource name of the + MetadataStore instance. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was last updated. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Metadata Store. If set, this Metadata Store and + all sub-resources of this Metadata Store are + secured using this key. + description (str): + Description of the MetadataStore. + state (google.cloud.aiplatform_v1.types.MetadataStore.MetadataStoreState): + Output only. State information of the + MetadataStore. + """ + + class MetadataStoreState(proto.Message): + r"""Represents state information for a MetadataStore. + + Attributes: + disk_utilization_bytes (int): + The disk utilization of the MetadataStore in + bytes. + """ + + disk_utilization_bytes = proto.Field( + proto.INT64, + number=1, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=5, + message=gca_encryption_spec.EncryptionSpec, + ) + description = proto.Field( + proto.STRING, + number=6, + ) + state = proto.Field( + proto.MESSAGE, + number=7, + message=MetadataStoreState, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py new file mode 100644 index 0000000000..044334fdf0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'MigratableResource', + }, +) + + +class MigratableResource(proto.Message): + r"""Represents one resource that exists in automl.googleapis.com, + datalabeling.googleapis.com or ml.googleapis.com. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + ml_engine_model_version (google.cloud.aiplatform_v1.types.MigratableResource.MlEngineModelVersion): + Output only. Represents one Version in + ml.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + automl_model (google.cloud.aiplatform_v1.types.MigratableResource.AutomlModel): + Output only. Represents one Model in + automl.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + automl_dataset (google.cloud.aiplatform_v1.types.MigratableResource.AutomlDataset): + Output only. Represents one Dataset in + automl.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + data_labeling_dataset (google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset): + Output only. Represents one Dataset in + datalabeling.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the last + migration attempt on this MigratableResource + started. Will not be set if there's no migration + attempt on this MigratableResource. + last_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MigratableResource was last updated. + """ + + class MlEngineModelVersion(proto.Message): + r"""Represents one model Version in ml.googleapis.com. + + Attributes: + endpoint (str): + The ml.googleapis.com endpoint that this model Version + currently lives in. Example values: + + - ml.googleapis.com + - us-centrall-ml.googleapis.com + - europe-west4-ml.googleapis.com + - asia-east1-ml.googleapis.com + version (str): + Full resource name of ml engine model Version. Format: + ``projects/{project}/models/{model}/versions/{version}``. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.STRING, + number=2, + ) + + class AutomlModel(proto.Message): + r"""Represents one Model in automl.googleapis.com. + + Attributes: + model (str): + Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + The Model's display name in + automl.googleapis.com. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class AutomlDataset(proto.Message): + r"""Represents one Dataset in automl.googleapis.com. + + Attributes: + dataset (str): + Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + automl.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + + class DataLabelingDataset(proto.Message): + r"""Represents one Dataset in datalabeling.googleapis.com. + + Attributes: + dataset (str): + Full resource name of data labeling Dataset. Format: + ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + datalabeling.googleapis.com. + data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): + The migratable AnnotatedDataset in + datalabeling.googleapis.com belongs to the data + labeling Dataset. + """ + + class DataLabelingAnnotatedDataset(proto.Message): + r"""Represents one AnnotatedDataset in + datalabeling.googleapis.com. + + Attributes: + annotated_dataset (str): + Full resource name of data labeling AnnotatedDataset. + Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + annotated_dataset_display_name (str): + The AnnotatedDataset's display name in + datalabeling.googleapis.com. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + annotated_dataset_display_name = proto.Field( + proto.STRING, + number=3, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + data_labeling_annotated_datasets = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', + ) + + ml_engine_model_version = proto.Field( + proto.MESSAGE, + number=1, + oneof='resource', + message=MlEngineModelVersion, + ) + automl_model = proto.Field( + proto.MESSAGE, + number=2, + oneof='resource', + message=AutomlModel, + ) + automl_dataset = proto.Field( + proto.MESSAGE, + number=3, + oneof='resource', + message=AutomlDataset, + ) + data_labeling_dataset = proto.Field( + proto.MESSAGE, + number=4, + oneof='resource', + message=DataLabelingDataset, + ) + last_migrate_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + last_update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py new file mode 100644 index 0000000000..57c3c22874 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py @@ -0,0 +1,479 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import migratable_resource as gca_migratable_resource +from google.cloud.aiplatform_v1.types import operation +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'BatchMigrateResourcesRequest', + 'MigrateResourceRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceResponse', + 'BatchMigrateResourcesOperationMetadata', + }, +) + + +class SearchMigratableResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Attributes: + parent (str): + Required. The location that the migratable resources should + be searched from. It's the Vertex AI location that the + resources can be migrated to, not the resources' original + location. Format: + ``projects/{project}/locations/{location}`` + page_size (int): + The standard page size. + The default and maximum value is 100. + page_token (str): + The standard page token. + filter (str): + A filter for your search. You can use the following types of + filters: + + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1.MigratableResource]: + + - ``ml_engine_model_version:*`` + - ``automl_model:*`` + - ``automl_dataset:*`` + - ``data_labeling_dataset:*`` + + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: + + - ``last_migrate_time:*`` filters for migrated + resources. + - ``NOT last_migrate_time:*`` filters for not yet + migrated resources. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class SearchMigratableResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Attributes: + migratable_resources (Sequence[google.cloud.aiplatform_v1.types.MigratableResource]): + All migratable resources that can be migrated + to the location specified in the request. + next_page_token (str): + The standard next-page token. The migratable_resources may + not fill page_size in SearchMigratableResourcesRequest even + when there are subsequent pages. + """ + + @property + def raw_page(self): + return self + + migratable_resources = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_migratable_resource.MigratableResource, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class BatchMigrateResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + Attributes: + parent (str): + Required. The location of the migrated resource will live + in. Format: ``projects/{project}/locations/{location}`` + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): + Required. The request messages specifying the + resources to migrate. They must be in the same + location as the destination. Up to 50 resources + can be migrated in one batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + migrate_resource_requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='MigrateResourceRequest', + ) + + +class MigrateResourceRequest(proto.Message): + r"""Config of migrating one resource from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): + Config for migrating Version in + ml.googleapis.com to Vertex AI's Model. + + This field is a member of `oneof`_ ``request``. + migrate_automl_model_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlModelConfig): + Config for migrating Model in + automl.googleapis.com to Vertex AI's Model. + + This field is a member of `oneof`_ ``request``. + migrate_automl_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): + Config for migrating Dataset in + automl.googleapis.com to Vertex AI's Dataset. + + This field is a member of `oneof`_ ``request``. + migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): + Config for migrating Dataset in + datalabeling.googleapis.com to Vertex AI's + Dataset. + + This field is a member of `oneof`_ ``request``. + """ + + class MigrateMlEngineModelVersionConfig(proto.Message): + r"""Config for migrating version in ml.googleapis.com to Vertex + AI's Model. + + Attributes: + endpoint (str): + Required. The ml.googleapis.com endpoint that this model + version should be migrated from. Example values: + + - ml.googleapis.com + + - us-centrall-ml.googleapis.com + + - europe-west4-ml.googleapis.com + + - asia-east1-ml.googleapis.com + model_version (str): + Required. Full resource name of ml engine model version. + Format: + ``projects/{project}/models/{model}/versions/{version}``. + model_display_name (str): + Required. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + model_version = proto.Field( + proto.STRING, + number=2, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class MigrateAutomlModelConfig(proto.Message): + r"""Config for migrating Model in automl.googleapis.com to Vertex + AI's Model. + + Attributes: + model (str): + Required. Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + Optional. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateAutomlDatasetConfig(proto.Message): + r"""Config for migrating Dataset in automl.googleapis.com to + Vertex AI's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + Required. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateDataLabelingDatasetConfig(proto.Message): + r"""Config for migrating Dataset in datalabeling.googleapis.com + to Vertex AI's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of data labeling Dataset. + Format: ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + Optional. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): + Optional. Configs for migrating + AnnotatedDataset in datalabeling.googleapis.com + to Vertex AI's SavedQuery. The specified + AnnotatedDatasets have to belong to the + datalabeling Dataset. + """ + + class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): + r"""Config for migrating AnnotatedDataset in + datalabeling.googleapis.com to Vertex AI's SavedQuery. + + Attributes: + annotated_dataset (str): + Required. Full resource name of data labeling + AnnotatedDataset. Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', + ) + + migrate_ml_engine_model_version_config = proto.Field( + proto.MESSAGE, + number=1, + oneof='request', + message=MigrateMlEngineModelVersionConfig, + ) + migrate_automl_model_config = proto.Field( + proto.MESSAGE, + number=2, + oneof='request', + message=MigrateAutomlModelConfig, + ) + migrate_automl_dataset_config = proto.Field( + proto.MESSAGE, + number=3, + oneof='request', + message=MigrateAutomlDatasetConfig, + ) + migrate_data_labeling_dataset_config = proto.Field( + proto.MESSAGE, + number=4, + oneof='request', + message=MigrateDataLabelingDatasetConfig, + ) + + +class BatchMigrateResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + Attributes: + migrate_resource_responses (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceResponse]): + Successfully migrated resources. + """ + + migrate_resource_responses = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='MigrateResourceResponse', + ) + + +class MigrateResourceResponse(proto.Message): + r"""Describes a successfully migrated resource. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + dataset (str): + Migrated Dataset's resource name. + + This field is a member of `oneof`_ ``migrated_resource``. + model (str): + Migrated Model's resource name. + + This field is a member of `oneof`_ ``migrated_resource``. + migratable_resource (google.cloud.aiplatform_v1.types.MigratableResource): + Before migration, the identifier in + ml.googleapis.com, automl.googleapis.com or + datalabeling.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + oneof='migrated_resource', + ) + model = proto.Field( + proto.STRING, + number=2, + oneof='migrated_resource', + ) + migratable_resource = proto.Field( + proto.MESSAGE, + number=3, + message=gca_migratable_resource.MigratableResource, + ) + + +class BatchMigrateResourcesOperationMetadata(proto.Message): + r"""Runtime operation information for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + partial_results (Sequence[google.cloud.aiplatform_v1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): + Partial results that reflect the latest + migration operation progress. + """ + + class PartialResult(proto.Message): + r"""Represents a partial result in batch migration operation for one + [MigrateResourceRequest][google.cloud.aiplatform.v1.MigrateResourceRequest]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + error (google.rpc.status_pb2.Status): + The error result of the migration request in + case of failure. + + This field is a member of `oneof`_ ``result``. + model (str): + Migrated model resource name. + + This field is a member of `oneof`_ ``result``. + dataset (str): + Migrated dataset resource name. + + This field is a member of `oneof`_ ``result``. + request (google.cloud.aiplatform_v1.types.MigrateResourceRequest): + It's the same as the value in + [MigrateResourceRequest.migrate_resource_requests][]. + """ + + error = proto.Field( + proto.MESSAGE, + number=2, + oneof='result', + message=status_pb2.Status, + ) + model = proto.Field( + proto.STRING, + number=3, + oneof='result', + ) + dataset = proto.Field( + proto.STRING, + number=4, + oneof='result', + ) + request = proto.Field( + proto.MESSAGE, + number=1, + message='MigrateResourceRequest', + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + partial_results = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=PartialResult, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py new file mode 100644 index 0000000000..4607c1c335 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py @@ -0,0 +1,800 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import explanation +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Model', + 'PredictSchemata', + 'ModelContainerSpec', + 'Port', + }, +) + + +class Model(proto.Message): + r"""A trained machine learning Model. + + Attributes: + name (str): + The resource name of the Model. + version_id (str): + Output only. Immutable. The version ID of the + model. A new version is committed when a new + model version is uploaded or trained under an + existing model id. It is an auto-incrementing + decimal number in string representation. + version_aliases (Sequence[str]): + User provided version aliases so that a model version can be + referenced via alias (i.e. + projects/{project}/locations/{location}/models/{model_id}@{version_alias} + instead of auto-generated version id (i.e. + projects/{project}/locations/{location}/models/{model_id}@{version_id}). + The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] to + distinguish from version_id. A default version alias will be + created for the first version of the model, and there must + be exactly one default version alias for a model. + version_create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this version was + created. + version_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this version was + most recently updated. + display_name (str): + Required. The display name of the Model. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Model. + version_description (str): + The description of this version. + predict_schemata (google.cloud.aiplatform_v1.types.PredictSchemata): + The schemata that describe formats of the Model's + predictions and explanations as given and returned via + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + and + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Model, + that is specific to it. Unset if the Model does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no additional metadata is needed, this field is set to an + empty string. Note: The URI given on output will be + immutable and probably different, including the URI scheme, + than the one given on input. The output URI will point to a + location where the user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + Immutable. An additional information about the Model; the + schema of the metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1.Model.metadata_schema_uri]. + Unset if the Model does not have any additional information. + supported_export_formats (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat]): + Output only. The formats in which this Model + may be exported. If empty, this Model is not + available for export. + training_pipeline (str): + Output only. The resource name of the + TrainingPipeline that uploaded this Model, if + any. + container_spec (google.cloud.aiplatform_v1.types.ModelContainerSpec): + Input only. The specification of the container that is to be + used when deploying this Model. The specification is + ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], + and all binaries it contains are copied and stored + internally by Vertex AI. Not present for AutoML Models. + artifact_uri (str): + Immutable. The path to the directory + containing the Model artifact and any of its + supporting files. Not present for AutoML Models. + supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1.types.Model.DeploymentResourcesType]): + Output only. When this Model is deployed, its prediction + resources are described by the ``prediction_resources`` + field of the + [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + object. Because not all Models support all resource + configuration types, the configuration types this Model + supports are listed here. If no configuration types are + listed, the Model cannot be deployed to an + [Endpoint][google.cloud.aiplatform.v1.Endpoint] and does not + support online predictions + ([PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]). + Such a Model can serve predictions by using a + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob], + if it has at least one entry each in + [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] + and + [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. + supported_input_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + exists, the instances should be given as per that schema. + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each instance is a + single line. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``csv`` The CSV format, where each instance is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record`` The TFRecord format, where each instance is + a single record in tfrecord syntax. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record-gzip`` Similar to ``tf-record``, but the file + is gzipped. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``bigquery`` Each instance is a single row in BigQuery. + Uses + [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source]. + + - ``file-list`` Each line of the file is the location of an + instance to process, uses ``gcs_source`` field of the + [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] + object. + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + supported_output_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. + If both + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri] + exist, the predictions are returned together with their + instances. In other words, the prediction has the original + instance data first, followed by the actual prediction + content (as per the schema). + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each prediction is + a single line. Uses + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``csv`` The CSV format, where each prediction is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``bigquery`` Each prediction is a single row in a + BigQuery table, uses + [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination] + . + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + uploaded into Vertex AI. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + most recently updated. + deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModelRef]): + Output only. The pointers to DeployedModels + created from this Model. Note that Model could + have been deployed to Endpoints in different + Locations. + explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): + The default explanation specification for this Model. + + The Model can be used for [requesting + explanation][PredictionService.Explain] after being + [deployed][google.cloud.aiplatform.v1.EndpointService.DeployModel] + if it is populated. The Model can be used for [batch + explanation][BatchPredictionJob.generate_explanation] if it + is populated. + + All fields of the explanation_spec can be overridden by + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + of + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1.DeployModelRequest.deployed_model], + or + [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] + of + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + + If the default explanation specification is not set for this + Model, this Model can still be used for [requesting + explanation][PredictionService.Explain] by setting + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + of + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1.DeployModelRequest.deployed_model] + and for [batch + explanation][BatchPredictionJob.generate_explanation] by + setting + [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] + of + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Models. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Model. If set, this Model and all sub-resources + of this Model will be secured by this key. + """ + class DeploymentResourcesType(proto.Enum): + r"""Identifies a type of Model's prediction resources.""" + DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 + DEDICATED_RESOURCES = 1 + AUTOMATIC_RESOURCES = 2 + + class ExportFormat(proto.Message): + r"""Represents export format supported by the Model. + All formats export to Google Cloud Storage. + + Attributes: + id (str): + Output only. The ID of the export format. The possible + format IDs are: + + - ``tflite`` Used for Android mobile devices. + + - ``edgetpu-tflite`` Used for `Edge + TPU `__ devices. + + - ``tf-saved-model`` A tensorflow model in SavedModel + format. + + - ``tf-js`` A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + + - ``core-ml`` Used for iOS mobile devices. + + - ``custom-trained`` A Model that was uploaded or trained + by custom code. + exportable_contents (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat.ExportableContent]): + Output only. The content of this Model that + may be exported. + """ + class ExportableContent(proto.Enum): + r"""The Model content that can be exported.""" + EXPORTABLE_CONTENT_UNSPECIFIED = 0 + ARTIFACT = 1 + IMAGE = 2 + + id = proto.Field( + proto.STRING, + number=1, + ) + exportable_contents = proto.RepeatedField( + proto.ENUM, + number=2, + enum='Model.ExportFormat.ExportableContent', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + version_id = proto.Field( + proto.STRING, + number=28, + ) + version_aliases = proto.RepeatedField( + proto.STRING, + number=29, + ) + version_create_time = proto.Field( + proto.MESSAGE, + number=31, + message=timestamp_pb2.Timestamp, + ) + version_update_time = proto.Field( + proto.MESSAGE, + number=32, + message=timestamp_pb2.Timestamp, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + version_description = proto.Field( + proto.STRING, + number=30, + ) + predict_schemata = proto.Field( + proto.MESSAGE, + number=4, + message='PredictSchemata', + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=5, + ) + metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + supported_export_formats = proto.RepeatedField( + proto.MESSAGE, + number=20, + message=ExportFormat, + ) + training_pipeline = proto.Field( + proto.STRING, + number=7, + ) + container_spec = proto.Field( + proto.MESSAGE, + number=9, + message='ModelContainerSpec', + ) + artifact_uri = proto.Field( + proto.STRING, + number=26, + ) + supported_deployment_resources_types = proto.RepeatedField( + proto.ENUM, + number=10, + enum=DeploymentResourcesType, + ) + supported_input_storage_formats = proto.RepeatedField( + proto.STRING, + number=11, + ) + supported_output_storage_formats = proto.RepeatedField( + proto.STRING, + number=12, + ) + create_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=15, + message=deployed_model_ref.DeployedModelRef, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=23, + message=explanation.ExplanationSpec, + ) + etag = proto.Field( + proto.STRING, + number=16, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=17, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class PredictSchemata(proto.Message): + r"""Contains the schemata used in Model's predictions and explanations + via + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict], + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain] + and + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + + Attributes: + instance_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single instance, which + are used in + [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances], + [ExplainRequest.instances][google.cloud.aiplatform.v1.ExplainRequest.instances] + and + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + parameters_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the parameters of prediction and + explanation via + [PredictRequest.parameters][google.cloud.aiplatform.v1.PredictRequest.parameters], + [ExplainRequest.parameters][google.cloud.aiplatform.v1.ExplainRequest.parameters] + and + [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1.BatchPredictionJob.model_parameters]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no parameters are supported, then it is set to an empty + string. Note: The URI given on output will be immutable and + probably different, including the URI scheme, than the one + given on input. The output URI will point to a location + where the user only has a read access. + prediction_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single prediction + produced by this Model, which are returned via + [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions], + [ExplainResponse.explanations][google.cloud.aiplatform.v1.ExplainResponse.explanations], + and + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + """ + + instance_schema_uri = proto.Field( + proto.STRING, + number=1, + ) + parameters_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + prediction_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelContainerSpec(proto.Message): + r"""Specification of a container for serving predictions. Some fields in + this message correspond to fields in the `Kubernetes Container v1 + core + specification `__. + + Attributes: + image_uri (str): + Required. Immutable. URI of the Docker image to be used as + the custom container for serving predictions. This URI must + identify an image in Artifact Registry or Container + Registry. Learn more about the `container publishing + requirements `__, + including permissions requirements for the Vertex AI Service + Agent. + + The container image is ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], + stored internally, and this original path is afterwards not + used. + + To learn about the requirements for the Docker image itself, + see `Custom container + requirements `__. + + You can use the URI to one of Vertex AI's `pre-built + container images for + prediction `__ + in this field. + command (Sequence[str]): + Immutable. Specifies the command that runs when the + container starts. This overrides the container's + `ENTRYPOINT `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``ENTRYPOINT``'s "exec" form, not its + "shell" form. + + If you do not specify this field, then the container's + ``ENTRYPOINT`` runs, in conjunction with the + [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] + field or the container's + ```CMD`` `__, + if either exists. If this field is not specified and the + container does not have an ``ENTRYPOINT``, then refer to the + Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` + interact `__. + + If you specify this field, then you can also specify the + ``args`` field to provide additional arguments for this + command. However, if you specify this field, then the + container's ``CMD`` is ignored. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``command`` field of the Kubernetes + Containers `v1 core + API `__. + args (Sequence[str]): + Immutable. Specifies arguments for the command that runs + when the container starts. This overrides the container's + ```CMD`` `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``CMD``'s "default parameters" form. + + If you don't specify this field but do specify the + [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] + field, then the command from the ``command`` field runs + without any additional arguments. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + If you don't specify this field and don't specify the + ``command`` field, then the container's + ```ENTRYPOINT`` `__ + and ``CMD`` determine what runs based on their default + behavior. See the Docker documentation about `how ``CMD`` + and ``ENTRYPOINT`` + interact `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``args`` field of the Kubernetes + Containers `v1 core + API `__. + env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): + Immutable. List of environment variables to set in the + container. After the container starts running, code running + in the container can read these environment variables. + + Additionally, the + [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] + and + [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] + fields can reference these variables. Later entries in this + list can also reference earlier entries. For example, the + following example sets the variable ``VAR_2`` to have the + value ``foo bar``: + + .. code:: json + + [ + { + "name": "VAR_1", + "value": "foo" + }, + { + "name": "VAR_2", + "value": "$(VAR_1) bar" + } + ] + + If you switch the order of the variables in the example, + then the expansion does not occur. + + This field corresponds to the ``env`` field of the + Kubernetes Containers `v1 core + API `__. + ports (Sequence[google.cloud.aiplatform_v1.types.Port]): + Immutable. List of ports to expose from the container. + Vertex AI sends any prediction requests that it receives to + the first port on this list. Vertex AI also sends `liveness + and health + checks `__ + to this port. + + If you do not specify this field, it defaults to following + value: + + .. code:: json + + [ + { + "containerPort": 8080 + } + ] + + Vertex AI does not use ports other than the first one + listed. This field corresponds to the ``ports`` field of the + Kubernetes Containers `v1 core + API `__. + predict_route (str): + Immutable. HTTP path on the container to send prediction + requests to. Vertex AI forwards requests sent using + [projects.locations.endpoints.predict][google.cloud.aiplatform.v1.PredictionService.Predict] + to this path on the container's IP address and port. Vertex + AI then returns the container's response in the API + response. + + For example, if you set this field to ``/foo``, then when + Vertex AI receives a prediction request, it forwards the + request body in a POST request to the ``/foo`` path on the + port of your container specified by the first value of this + ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + health_route (str): + Immutable. HTTP path on the container to send health checks + to. Vertex AI intermittently sends GET requests to this path + on the container's IP address and port to check that the + container is healthy. Read more about `health + checks `__. + + For example, if you set this field to ``/bar``, then Vertex + AI intermittently sends a GET request to the ``/bar`` path + on the port of your container specified by the first value + of this ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=env_var.EnvVar, + ) + ports = proto.RepeatedField( + proto.MESSAGE, + number=5, + message='Port', + ) + predict_route = proto.Field( + proto.STRING, + number=6, + ) + health_route = proto.Field( + proto.STRING, + number=7, + ) + + +class Port(proto.Message): + r"""Represents a network port in a container. + + Attributes: + container_port (int): + The number of the port to expose on the pod's + IP address. Must be a valid port number, between + 1 and 65535 inclusive. + """ + + container_port = proto.Field( + proto.INT32, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py new file mode 100644 index 0000000000..eced06f053 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py @@ -0,0 +1,490 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import model_monitoring +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ModelDeploymentMonitoringObjectiveType', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + }, +) + + +class ModelDeploymentMonitoringObjectiveType(proto.Enum): + r"""The Model Monitoring Objective types.""" + MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0 + RAW_FEATURE_SKEW = 1 + RAW_FEATURE_DRIFT = 2 + FEATURE_ATTRIBUTION_SKEW = 3 + FEATURE_ATTRIBUTION_DRIFT = 4 + + +class ModelDeploymentMonitoringJob(proto.Message): + r"""Represents a job that runs periodically to monitor the + deployed models in an endpoint. It will analyze the logged + training & prediction data to detect any abnormal behaviors. + + Attributes: + name (str): + Output only. Resource name of a + ModelDeploymentMonitoringJob. + display_name (str): + Required. The user-defined name of the + ModelDeploymentMonitoringJob. The name can be up + to 128 characters long and can be consist of any + UTF-8 characters. + Display name of a ModelDeploymentMonitoringJob. + endpoint (str): + Required. Endpoint resource name. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the + monitoring job. When the job is still creating, + the state will be 'PENDING'. Once the job is + successfully created, the state will be + 'RUNNING'. Pause the job, the state will be + 'PAUSED'. + Resume the job, the state will return to + 'RUNNING'. + schedule_state (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): + Output only. Schedule state when the + monitoring job is in Running state. + latest_monitoring_pipeline_metadata (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.LatestMonitoringPipelineMetadata): + Output only. Latest triggered monitoring + pipeline metadata. + model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveConfig]): + Required. The config for monitoring + objectives. This is a per DeployedModel config. + Each DeployedModel needs to be configured + separately. + model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringScheduleConfig): + Required. Schedule config for running the + monitoring job. + logging_sampling_strategy (google.cloud.aiplatform_v1.types.SamplingStrategy): + Required. Sample Strategy for logging. + model_monitoring_alert_config (google.cloud.aiplatform_v1.types.ModelMonitoringAlertConfig): + Alert config for model monitoring. + predict_instance_schema_uri (str): + YAML schema file uri describing the format of + a single instance, which are given to format + this Endpoint's prediction (and explanation). If + not set, we will generate predict schema from + collected predict requests. + sample_predict_instance (google.protobuf.struct_pb2.Value): + Sample Predict instance, same format as + [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances], + this can be set as a replacement of + [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. + If not set, we will generate predict schema from collected + predict requests. + analysis_instance_schema_uri (str): + YAML schema file uri describing the format of a single + instance that you want Tensorflow Data Validation (TFDV) to + analyze. + + If this field is empty, all the feature data types are + inferred from + [predict_instance_schema_uri][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.predict_instance_schema_uri], + meaning that TFDV will use the data in the exact format(data + type) as prediction request/response. If there are any data + type differences between predict instance and TFDV instance, + this field can be used to override the schema. For models + trained with Vertex AI, this field must be set as all the + fields in predict instance formatted as string. + bigquery_tables (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable]): + Output only. The created bigquery tables for + the job under customer project. Customer could + do their own query & analysis. There could be 4 + log tables in maximum: + 1. Training data logging predict + request/response 2. Serving data logging predict + request/response + log_ttl (google.protobuf.duration_pb2.Duration): + The TTL of BigQuery tables in user projects + which stores logs. A day is the basic unit of + the TTL and we take the ceil of TTL/86400(a + day). e.g. { second: 3600} indicates ttl = 1 + day. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your ModelDeploymentMonitoringJob. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was updated most + recently. + next_schedule_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this monitoring + pipeline will be scheduled to run for the next + round. + stats_anomalies_base_directory (google.cloud.aiplatform_v1.types.GcsDestination): + Stats anomalies base folder path. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + ModelDeploymentMonitoringJob. If set, this + ModelDeploymentMonitoringJob and all + sub-resources of this + ModelDeploymentMonitoringJob will be secured by + this key. + enable_monitoring_pipeline_logs (bool): + If true, the scheduled monitoring pipeline logs are sent to + Google Cloud Logging, including pipeline status and + anomalies detected. Please note the logs incur cost, which + are subject to `Cloud Logging + pricing `__. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. + """ + class MonitoringScheduleState(proto.Enum): + r"""The state to Specify the monitoring pipeline.""" + MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0 + PENDING = 1 + OFFLINE = 2 + RUNNING = 3 + + class LatestMonitoringPipelineMetadata(proto.Message): + r"""All metadata of most recent monitoring pipelines. + + Attributes: + run_time (google.protobuf.timestamp_pb2.Timestamp): + The time that most recent monitoring + pipelines that is related to this run. + status (google.rpc.status_pb2.Status): + The status of the most recent monitoring + pipeline. + """ + + run_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + endpoint = proto.Field( + proto.STRING, + number=3, + ) + state = proto.Field( + proto.ENUM, + number=4, + enum=job_state.JobState, + ) + schedule_state = proto.Field( + proto.ENUM, + number=5, + enum=MonitoringScheduleState, + ) + latest_monitoring_pipeline_metadata = proto.Field( + proto.MESSAGE, + number=25, + message=LatestMonitoringPipelineMetadata, + ) + model_deployment_monitoring_objective_configs = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='ModelDeploymentMonitoringObjectiveConfig', + ) + model_deployment_monitoring_schedule_config = proto.Field( + proto.MESSAGE, + number=7, + message='ModelDeploymentMonitoringScheduleConfig', + ) + logging_sampling_strategy = proto.Field( + proto.MESSAGE, + number=8, + message=model_monitoring.SamplingStrategy, + ) + model_monitoring_alert_config = proto.Field( + proto.MESSAGE, + number=15, + message=model_monitoring.ModelMonitoringAlertConfig, + ) + predict_instance_schema_uri = proto.Field( + proto.STRING, + number=9, + ) + sample_predict_instance = proto.Field( + proto.MESSAGE, + number=19, + message=struct_pb2.Value, + ) + analysis_instance_schema_uri = proto.Field( + proto.STRING, + number=16, + ) + bigquery_tables = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='ModelDeploymentMonitoringBigQueryTable', + ) + log_ttl = proto.Field( + proto.MESSAGE, + number=17, + message=duration_pb2.Duration, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + create_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + next_schedule_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + stats_anomalies_base_directory = proto.Field( + proto.MESSAGE, + number=20, + message=io.GcsDestination, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=21, + message=gca_encryption_spec.EncryptionSpec, + ) + enable_monitoring_pipeline_logs = proto.Field( + proto.BOOL, + number=22, + ) + error = proto.Field( + proto.MESSAGE, + number=23, + message=status_pb2.Status, + ) + + +class ModelDeploymentMonitoringBigQueryTable(proto.Message): + r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery + table name as well as some information of the logs stored in + this table. + + Attributes: + log_source (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable.LogSource): + The source of log. + log_type (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable.LogType): + The type of log. + bigquery_table_path (str): + The created BigQuery table to store logs. Customer could do + their own query & analysis. Format: + ``bq://.model_deployment_monitoring_._`` + """ + class LogSource(proto.Enum): + r"""Indicates where does the log come from.""" + LOG_SOURCE_UNSPECIFIED = 0 + TRAINING = 1 + SERVING = 2 + + class LogType(proto.Enum): + r"""Indicates what type of traffic does the log belong to.""" + LOG_TYPE_UNSPECIFIED = 0 + PREDICT = 1 + EXPLAIN = 2 + + log_source = proto.Field( + proto.ENUM, + number=1, + enum=LogSource, + ) + log_type = proto.Field( + proto.ENUM, + number=2, + enum=LogType, + ) + bigquery_table_path = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelDeploymentMonitoringObjectiveConfig(proto.Message): + r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of + deployed_model_id to ModelMonitoringObjectiveConfig. + + Attributes: + deployed_model_id (str): + The DeployedModel ID of the objective config. + objective_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig): + The objective config of for the + modelmonitoring job of this deployed model. + """ + + deployed_model_id = proto.Field( + proto.STRING, + number=1, + ) + objective_config = proto.Field( + proto.MESSAGE, + number=2, + message=model_monitoring.ModelMonitoringObjectiveConfig, + ) + + +class ModelDeploymentMonitoringScheduleConfig(proto.Message): + r"""The config for scheduling monitoring job. + + Attributes: + monitor_interval (google.protobuf.duration_pb2.Duration): + Required. The model monitoring job scheduling + interval. It will be rounded up to next full + hour. This defines how often the monitoring jobs + are triggered. + monitor_window (google.protobuf.duration_pb2.Duration): + The time window of the prediction data being included in + each prediction dataset. This window specifies how long the + data should be collected from historical model results for + each run. If not set, + [ModelDeploymentMonitoringScheduleConfig.monitor_interval][google.cloud.aiplatform.v1.ModelDeploymentMonitoringScheduleConfig.monitor_interval] + will be used. e.g. If currently the cutoff time is + 2022-01-08 14:30:00 and the monitor_window is set to be + 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 + 14:30:00 will be retrieved and aggregated to calculate the + monitoring statistics. + """ + + monitor_interval = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + monitor_window = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class ModelMonitoringStatsAnomalies(proto.Message): + r"""Statistics and anomalies generated by Model Monitoring. + + Attributes: + objective (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveType): + Model Monitoring Objective those stats and + anomalies belonging to. + deployed_model_id (str): + Deployed Model ID. + anomaly_count (int): + Number of anomalies within all stats. + feature_stats (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]): + A list of historical Stats and Anomalies + generated for all Features. + """ + + class FeatureHistoricStatsAnomalies(proto.Message): + r"""Historical Stats (and Anomalies) for a specific Feature. + + Attributes: + feature_display_name (str): + Display Name of the Feature. + threshold (google.cloud.aiplatform_v1.types.ThresholdConfig): + Threshold for anomaly detection. + training_stats (google.cloud.aiplatform_v1.types.FeatureStatsAnomaly): + Stats calculated for the Training Dataset. + prediction_stats (Sequence[google.cloud.aiplatform_v1.types.FeatureStatsAnomaly]): + A list of historical stats generated by + different time window's Prediction Dataset. + """ + + feature_display_name = proto.Field( + proto.STRING, + number=1, + ) + threshold = proto.Field( + proto.MESSAGE, + number=3, + message=model_monitoring.ThresholdConfig, + ) + training_stats = proto.Field( + proto.MESSAGE, + number=4, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + prediction_stats = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + objective = proto.Field( + proto.ENUM, + number=1, + enum='ModelDeploymentMonitoringObjectiveType', + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + anomaly_count = proto.Field( + proto.INT32, + number=3, + ) + feature_stats = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=FeatureHistoricStatsAnomalies, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py new file mode 100644 index 0000000000..cf622edca3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import explanation +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ModelEvaluation', + }, +) + + +class ModelEvaluation(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on all of the test data against annotations from the + test data. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluation. + display_name (str): + The display name of the ModelEvaluation. + metrics_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing the + [metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics] + of this ModelEvaluation. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Evaluation metrics of the Model. The schema of the metrics + is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluation.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluation was created. + slice_dimensions (Sequence[str]): + All possible + [dimensions][ModelEvaluationSlice.slice.dimension] of + ModelEvaluationSlices. The dimensions can be used as the + filter of the + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] + request, in the form of ``slice.dimension = ``. + data_item_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing [EvaluatedDataItemView.data_item_payload][] and + [EvaluatedAnnotation.data_item_payload][]. The schema is + defined as an OpenAPI 3.0.2 `Schema + Object `__. + + This field is not populated if there are neither + EvaluatedDataItemViews nor EvaluatedAnnotations under this + ModelEvaluation. + annotation_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing [EvaluatedDataItemView.predictions][], + [EvaluatedDataItemView.ground_truths][], + [EvaluatedAnnotation.predictions][], and + [EvaluatedAnnotation.ground_truths][]. The schema is defined + as an OpenAPI 3.0.2 `Schema + Object `__. + + This field is not populated if there are neither + EvaluatedDataItemViews nor EvaluatedAnnotations under this + ModelEvaluation. + model_explanation (google.cloud.aiplatform_v1.types.ModelExplanation): + Aggregated explanation metrics for the + Model's prediction output over the data this + ModelEvaluation uses. This field is populated + only if the Model is evaluated with + explanations, and only for AutoML tabular + Models. + explanation_specs (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): + Describes the values of + [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] + that are used for explaining the predicted values on the + evaluated data. + metadata (google.protobuf.struct_pb2.Value): + The metadata of the ModelEvaluation. For the ModelEvaluation + uploaded from Managed Pipeline, metadata contains a + structured value with keys of "pipeline_job_id", + "evaluation_dataset_type", "evaluation_dataset_path". + """ + + class ModelEvaluationExplanationSpec(proto.Message): + r""" + + Attributes: + explanation_type (str): + Explanation type. + + For AutoML Image Classification models, possible values are: + + - ``image-integrated-gradients`` + - ``image-xrai`` + explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): + Explanation spec details. + """ + + explanation_type = proto.Field( + proto.STRING, + number=1, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=2, + message=explanation.ExplanationSpec, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=10, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + metrics = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + slice_dimensions = proto.RepeatedField( + proto.STRING, + number=5, + ) + data_item_schema_uri = proto.Field( + proto.STRING, + number=6, + ) + annotation_schema_uri = proto.Field( + proto.STRING, + number=7, + ) + model_explanation = proto.Field( + proto.MESSAGE, + number=8, + message=explanation.ModelExplanation, + ) + explanation_specs = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=ModelEvaluationExplanationSpec, + ) + metadata = proto.Field( + proto.MESSAGE, + number=11, + message=struct_pb2.Value, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py new file mode 100644 index 0000000000..e9d7780e51 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ModelEvaluationSlice', + }, +) + + +class ModelEvaluationSlice(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on a slice of the test data against ground truth + annotations. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluationSlice. + slice_ (google.cloud.aiplatform_v1.types.ModelEvaluationSlice.Slice): + Output only. The slice of the test data that + is used to evaluate the Model. + metrics_schema_uri (str): + Output only. Points to a YAML file stored on Google Cloud + Storage describing the + [metrics][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics] + of this ModelEvaluationSlice. The schema is defined as an + OpenAPI 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Output only. Sliced evaluation metrics of the Model. The + schema of the metrics is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluationSlice was created. + """ + + class Slice(proto.Message): + r"""Definition of a slice. + + Attributes: + dimension (str): + Output only. The dimension of the slice. Well-known + dimensions are: + + - ``annotationSpec``: This slice is on the test data that + has either ground truth or prediction with + [AnnotationSpec.display_name][google.cloud.aiplatform.v1.AnnotationSpec.display_name] + equals to + [value][google.cloud.aiplatform.v1.ModelEvaluationSlice.Slice.value]. + value (str): + Output only. The value of the dimension in + this slice. + """ + + dimension = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + slice_ = proto.Field( + proto.MESSAGE, + number=2, + message=Slice, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metrics = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py new file mode 100644 index 0000000000..9ccd026a77 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import io + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ModelMonitoringObjectiveConfig', + 'ModelMonitoringAlertConfig', + 'ThresholdConfig', + 'SamplingStrategy', + }, +) + + +class ModelMonitoringObjectiveConfig(proto.Message): + r"""Next ID: 8 + + Attributes: + training_dataset (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingDataset): + Training dataset for models. This field has + to be set only if + TrainingPredictionSkewDetectionConfig is + specified. + training_prediction_skew_detection_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig): + The config for skew between training data and + prediction data. + prediction_drift_detection_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): + The config for drift of prediction data. + explanation_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig): + The config for integrating with Vertex + Explainable AI. + """ + + class TrainingDataset(proto.Message): + r"""Training Dataset information. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + dataset (str): + The resource name of the Dataset used to + train this Model. + + This field is a member of `oneof`_ ``data_source``. + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + The Google Cloud Storage uri of the unmanaged + Dataset used to train this Model. + + This field is a member of `oneof`_ ``data_source``. + bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): + The BigQuery table of the unmanaged Dataset + used to train this Model. + + This field is a member of `oneof`_ ``data_source``. + data_format (str): + Data format of the dataset, only applicable + if the input is from Google Cloud Storage. + The possible formats are: + + "tf-record" + The source file is a TFRecord file. + + "csv" + The source file is a CSV file. + "jsonl" + The source file is a JSONL file. + target_field (str): + The target field name the model is to + predict. This field will be excluded when doing + Predict and (or) Explain for the training data. + logging_sampling_strategy (google.cloud.aiplatform_v1.types.SamplingStrategy): + Strategy to sample data from Training + Dataset. If not set, we process the whole + dataset. + """ + + dataset = proto.Field( + proto.STRING, + number=3, + oneof='data_source', + ) + gcs_source = proto.Field( + proto.MESSAGE, + number=4, + oneof='data_source', + message=io.GcsSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=5, + oneof='data_source', + message=io.BigQuerySource, + ) + data_format = proto.Field( + proto.STRING, + number=2, + ) + target_field = proto.Field( + proto.STRING, + number=6, + ) + logging_sampling_strategy = proto.Field( + proto.MESSAGE, + number=7, + message='SamplingStrategy', + ) + + class TrainingPredictionSkewDetectionConfig(proto.Message): + r"""The config for Training & Prediction data skew detection. It + specifies the training dataset sources and the skew detection + parameters. + + Attributes: + skew_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for skew, a value threshold must be configured + for that feature. The threshold here is against + feature distribution distance between the + training and prediction feature. + attribution_score_skew_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. The threshold here is against + attribution score distance between the training + and prediction feature. + default_skew_threshold (google.cloud.aiplatform_v1.types.ThresholdConfig): + Skew anomaly detection threshold used by all + features. When the per-feature thresholds are + not set, this field can be used to specify a + threshold for all features. + """ + + skew_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='ThresholdConfig', + ) + attribution_score_skew_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message='ThresholdConfig', + ) + default_skew_threshold = proto.Field( + proto.MESSAGE, + number=6, + message='ThresholdConfig', + ) + + class PredictionDriftDetectionConfig(proto.Message): + r"""The config for Prediction data drift detection. + + Attributes: + drift_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for drift, a value threshold must be configured + for that feature. The threshold here is against + feature distribution distance between different + time windws. + attribution_score_drift_thresholds (Mapping[str, google.cloud.aiplatform_v1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. The threshold here is against + attribution score distance between different + time windows. + default_drift_threshold (google.cloud.aiplatform_v1.types.ThresholdConfig): + Drift anomaly detection threshold used by all + features. When the per-feature thresholds are + not set, this field can be used to specify a + threshold for all features. + """ + + drift_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='ThresholdConfig', + ) + attribution_score_drift_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message='ThresholdConfig', + ) + default_drift_threshold = proto.Field( + proto.MESSAGE, + number=5, + message='ThresholdConfig', + ) + + class ExplanationConfig(proto.Message): + r"""The config for integrating with Vertex Explainable AI. Only + applicable if the Model has explanation_spec populated. + + Attributes: + enable_feature_attributes (bool): + If want to analyze the Vertex Explainable AI + feature attribute scores or not. If set to true, + Vertex AI will log the feature attributions from + explain response and do the skew/drift detection + for them. + explanation_baseline (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline): + Predictions generated by the + BatchPredictionJob using baseline dataset. + """ + + class ExplanationBaseline(proto.Message): + r"""Output from + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob] + for Model Monitoring baseline dataset, which can be used to generate + baseline attribution scores. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs (google.cloud.aiplatform_v1.types.GcsDestination): + Cloud Storage location for BatchExplain + output. + + This field is a member of `oneof`_ ``destination``. + bigquery (google.cloud.aiplatform_v1.types.BigQueryDestination): + BigQuery location for BatchExplain output. + + This field is a member of `oneof`_ ``destination``. + prediction_format (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat): + The storage format of the predictions + generated BatchPrediction job. + """ + class PredictionFormat(proto.Enum): + r"""The storage format of the predictions generated + BatchPrediction job. + """ + PREDICTION_FORMAT_UNSPECIFIED = 0 + JSONL = 2 + BIGQUERY = 3 + + gcs = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.GcsDestination, + ) + bigquery = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.BigQueryDestination, + ) + prediction_format = proto.Field( + proto.ENUM, + number=1, + enum='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat', + ) + + enable_feature_attributes = proto.Field( + proto.BOOL, + number=1, + ) + explanation_baseline = proto.Field( + proto.MESSAGE, + number=2, + message='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline', + ) + + training_dataset = proto.Field( + proto.MESSAGE, + number=1, + message=TrainingDataset, + ) + training_prediction_skew_detection_config = proto.Field( + proto.MESSAGE, + number=2, + message=TrainingPredictionSkewDetectionConfig, + ) + prediction_drift_detection_config = proto.Field( + proto.MESSAGE, + number=3, + message=PredictionDriftDetectionConfig, + ) + explanation_config = proto.Field( + proto.MESSAGE, + number=5, + message=ExplanationConfig, + ) + + +class ModelMonitoringAlertConfig(proto.Message): + r"""Next ID: 3 + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + email_alert_config (google.cloud.aiplatform_v1.types.ModelMonitoringAlertConfig.EmailAlertConfig): + Email alert config. + + This field is a member of `oneof`_ ``alert``. + enable_logging (bool): + Dump the anomalies to Cloud Logging. The anomalies will be + put to json payload encoded from proto + [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. + This can be further sinked to Pub/Sub or any other services + supported by Cloud Logging. + """ + + class EmailAlertConfig(proto.Message): + r"""The config for email alert. + + Attributes: + user_emails (Sequence[str]): + The email addresses to send the alert. + """ + + user_emails = proto.RepeatedField( + proto.STRING, + number=1, + ) + + email_alert_config = proto.Field( + proto.MESSAGE, + number=1, + oneof='alert', + message=EmailAlertConfig, + ) + enable_logging = proto.Field( + proto.BOOL, + number=2, + ) + + +class ThresholdConfig(proto.Message): + r"""The config for feature monitoring threshold. + Next ID: 3 + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value (float): + Specify a threshold value that can trigger + the alert. If this threshold config is for + feature distribution distance: 1. For + categorical feature, the distribution distance + is calculated by L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + Each feature must have a non-zero threshold if + they need to be monitored. Otherwise no alert + will be triggered for that feature. + + This field is a member of `oneof`_ ``threshold``. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + oneof='threshold', + ) + + +class SamplingStrategy(proto.Message): + r"""Sampling Strategy for logging, can be for both training and + prediction dataset. + Next ID: 2 + + Attributes: + random_sample_config (google.cloud.aiplatform_v1.types.SamplingStrategy.RandomSampleConfig): + Random sample config. Will support more + sampling strategies later. + """ + + class RandomSampleConfig(proto.Message): + r"""Requests are randomly selected. + + Attributes: + sample_rate (float): + Sample rate (0, 1] + """ + + sample_rate = proto.Field( + proto.DOUBLE, + number=1, + ) + + random_sample_config = proto.Field( + proto.MESSAGE, + number=1, + message=RandomSampleConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py new file mode 100644 index 0000000000..3f814aa0af --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py @@ -0,0 +1,815 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'UploadModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelResponse', + 'GetModelRequest', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListModelVersionsRequest', + 'ListModelVersionsResponse', + 'UpdateModelRequest', + 'DeleteModelRequest', + 'DeleteModelVersionRequest', + 'MergeVersionAliasesRequest', + 'ExportModelRequest', + 'ExportModelOperationMetadata', + 'ExportModelResponse', + 'ImportModelEvaluationRequest', + 'GetModelEvaluationRequest', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'GetModelEvaluationSliceRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + }, +) + + +class UploadModelRequest(proto.Message): + r"""Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. + + Attributes: + parent (str): + Required. The resource name of the Location into which to + upload the Model. Format: + ``projects/{project}/locations/{location}`` + parent_model (str): + Optional. The resource name of the model into + which to upload the version. Only specify this + field when uploading a new version. + model_id (str): + Optional. The ID to use for the uploaded Model, which will + become the final component of the model resource name. + + This value may be up to 63 characters, and valid characters + are ``[a-z0-9_-]``. The first character cannot be a number + or hyphen. + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + parent_model = proto.Field( + proto.STRING, + number=4, + ) + model_id = proto.Field( + proto.STRING, + number=5, + ) + model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model.Model, + ) + + +class UploadModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UploadModelResponse(proto.Message): + r"""Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + Attributes: + model (str): + The name of the uploaded Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + model_version_id (str): + Output only. The version ID of the model that + is uploaded. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_version_id = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelRequest(proto.Message): + r"""Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. + + Attributes: + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + In order to retrieve a specific version of the model, also + provide the version ID or version alias. Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the "default" + version will be returned. The "default" version alias is + created for the first version of the model, and can be moved + to other versions later on. There will be exactly one + default version. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``model`` supports = and !=. ``model`` represents the + Model ID, i.e. the last segment of the Model's [resource + name][google.cloud.aiplatform.v1.Model.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``model=1234`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token] + of the previous + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + + Attributes: + models (Sequence[google.cloud.aiplatform_v1.types.Model]): + List of Models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.aiplatform.v1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + models = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class ListModelVersionsRequest(proto.Message): + r"""Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1.ModelService.ListModelVersions]. + + Attributes: + name (str): + Required. The name of the model to list + versions for. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelVersionsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelVersionsResponse.next_page_token] + of the previous [ModelService.ListModelversions][] call. + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``labels.myKey="myValue"`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelVersionsResponse(proto.Message): + r"""Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1.ModelService.ListModelVersions] + + Attributes: + models (Sequence[google.cloud.aiplatform_v1.types.Model]): + List of Model versions in the requested page. + In the returned Model name field, version ID + instead of regvision tag will be included. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListModelVersionsRequest.page_token][google.cloud.aiplatform.v1.ListModelVersionsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + models = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateModelRequest(proto.Message): + r"""Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. + + Attributes: + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, refers to + a version specific update. + 2. model.name without the @ value, e.g. models/123, refers + to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a model + update. + 4. Supported model fields: display_name, description; + supported version-specific fields: version_description. + Labels are supported in both scenarios. Both the model + labels and the version labels are merged when a model is + returned. When updating labels, if the request is for + model-specific update, model label gets updated. + Otherwise, version labels get updated. + 5. A model name or model version name fields update mismatch + will cause a precondition error. + 6. One request cannot update both the model and the version + fields. You must update them separately. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. + + Attributes: + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeleteModelVersionRequest(proto.Message): + r"""Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1.ModelService.DeleteModelVersion]. + + Attributes: + name (str): + Required. The name of the model version to be deleted, with + a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class MergeVersionAliasesRequest(proto.Message): + r"""Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1.ModelService.MergeVersionAliases]. + + Attributes: + name (str): + Required. The name of the model version to merge aliases, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + version_aliases (Sequence[str]): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix to + an alias means removing that alias from the version. ``-`` + is NOT counted in the 128 characters. Example: ``-golden`` + means removing the ``golden`` alias from the version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have the + exactly same order from this MergeVersionAliases API. 2) + Adding and deleting the same alias in the request is not + recommended, and the 2 operations will be cancelled out. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version_aliases = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. + + Attributes: + name (str): + Required. The resource name of the Model to + export. The resource name may contain version id + or version alias to specify the version, if no + version is specified, the default version will + be exported. + output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): + Required. The desired output location and + configuration. + """ + + class OutputConfig(proto.Message): + r"""Output configuration for the Model export. + + Attributes: + export_format_id (str): + The ID of the format in which the Model must be exported. + Each Model lists the [export formats it + supports][google.cloud.aiplatform.v1.Model.supported_export_formats]. + If no value is provided here, then the first from the list + of the Model's supported formats is used by default. + artifact_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location where the Model artifact is to be + written to. Under the directory given as the destination a + new one with name + "``model-export--``", + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format, will be created. Inside, the Model and any of its + supporting files will be written. This field should only be + set when the ``exportableContent`` field of the + [Model.supported_export_formats] object contains + ``ARTIFACT``. + image_destination (google.cloud.aiplatform_v1.types.ContainerRegistryDestination): + The Google Container Registry or Artifact Registry uri where + the Model container image will be copied to. This field + should only be set when the ``exportableContent`` field of + the [Model.supported_export_formats] object contains + ``IMAGE``. + """ + + export_format_id = proto.Field( + proto.STRING, + number=1, + ) + artifact_destination = proto.Field( + proto.MESSAGE, + number=3, + message=io.GcsDestination, + ) + image_destination = proto.Field( + proto.MESSAGE, + number=4, + message=io.ContainerRegistryDestination, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + output_config = proto.Field( + proto.MESSAGE, + number=2, + message=OutputConfig, + ) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + output_info (google.cloud.aiplatform_v1.types.ExportModelOperationMetadata.OutputInfo): + Output only. Information further describing + the output of this Model export. + """ + + class OutputInfo(proto.Message): + r"""Further describes the output of the ExportModel. Supplements + [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1.ExportModelRequest.OutputConfig]. + + Attributes: + artifact_output_uri (str): + Output only. If the Model artifact is being + exported to Google Cloud Storage this is the + full path of the directory created, into which + the Model files are being written to. + image_output_uri (str): + Output only. If the Model image is being + exported to Google Container Registry or + Artifact Registry this is the full path of the + image created. + """ + + artifact_output_uri = proto.Field( + proto.STRING, + number=2, + ) + image_output_uri = proto.Field( + proto.STRING, + number=3, + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + output_info = proto.Field( + proto.MESSAGE, + number=2, + message=OutputInfo, + ) + + +class ExportModelResponse(proto.Message): + r"""Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + + """ + + +class ImportModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation] + + Attributes: + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + model_evaluation (google.cloud.aiplatform_v1.types.ModelEvaluation): + Required. Model evaluation resource to be + imported. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + model_evaluation = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_evaluation.ModelEvaluation, + ) + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. + + Attributes: + name (str): + Required. The name of the ModelEvaluation resource. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Attributes: + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationsResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Attributes: + model_evaluations (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_evaluation.ModelEvaluation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelEvaluationSliceRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. + + Attributes: + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationSlicesRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Attributes: + parent (str): + Required. The resource name of the ModelEvaluation to list + the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + filter (str): + The standard list filter. + + - ``slice.dimension`` - for =. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationSlicesResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Attributes: + model_evaluation_slices (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation_slices = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_evaluation_slice.ModelEvaluationSlice, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py new file mode 100644 index 0000000000..f5e8aea43d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'GenericOperationMetadata', + 'DeleteOperationMetadata', + }, +) + + +class GenericOperationMetadata(proto.Message): + r"""Generic Metadata shared by all operations. + + Attributes: + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + updated for the last time. If the operation has + finished (successfully or not), this is the + finish time. + """ + + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message='GenericOperationMetadata', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_failure_policy.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_failure_policy.py new file mode 100644 index 0000000000..ffe07f447b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_failure_policy.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'PipelineFailurePolicy', + }, +) + + +class PipelineFailurePolicy(proto.Enum): + r"""Reperesents the failure policy of a pipeline. Currently, the default + of a pipeline is that the pipeline will continue to run until no + more tasks can be executed, also known as + PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to + PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new + tasks when a task has failed. Any scheduled tasks will continue to + completion. + """ + PIPELINE_FAILURE_POLICY_UNSPECIFIED = 0 + PIPELINE_FAILURE_POLICY_FAIL_SLOW = 1 + PIPELINE_FAILURE_POLICY_FAIL_FAST = 2 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py new file mode 100644 index 0000000000..1f256b9bc4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -0,0 +1,574 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import pipeline_failure_policy +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import value as gca_value +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'PipelineJob', + 'PipelineTemplateMetadata', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + }, +) + + +class PipelineJob(proto.Message): + r"""An instance of a machine learning PipelineJob. + + Attributes: + name (str): + Output only. The resource name of the + PipelineJob. + display_name (str): + The display name of the Pipeline. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline creation time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline end time. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this PipelineJob + was most recently updated. + pipeline_spec (google.protobuf.struct_pb2.Struct): + The spec of the pipeline. + state (google.cloud.aiplatform_v1.types.PipelineState): + Output only. The detailed state of the job. + job_detail (google.cloud.aiplatform_v1.types.PipelineJobDetail): + Output only. The details of pipeline run. Not + available in the list view. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + pipeline execution. Only populated when the + pipeline's state is FAILED or CANCELLED. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize PipelineJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + runtime_config (google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig): + Runtime config of the pipeline. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + pipelineJob. If set, this PipelineJob and all of + its sub-resources will be secured by this key. + service_account (str): + The service account that the pipeline workload runs as. If + not specified, the Compute Engine default service account in + the project will be used. See + https://cloud.google.com/compute/docs/access/service-accounts#default_service_account + + Users starting the pipeline must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + network (str): + The full name of the Compute Engine + `network `__ + to which the Pipeline Job's workload should be peered. For + example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. Pipeline job will apply the network configuration + to the GCP resources being launched, if applied, such as + Vertex AI Training or Dataflow job. If left unspecified, the + workload is not peered with any network. + template_uri (str): + A template uri from where the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec], + if empty, will be downloaded. + template_metadata (google.cloud.aiplatform_v1.types.PipelineTemplateMetadata): + Output only. Pipeline template metadata. Will fill up fields + if + [PipelineJob.template_uri][google.cloud.aiplatform.v1.PipelineJob.template_uri] + is from supported template registry. + """ + + class RuntimeConfig(proto.Message): + r"""The runtime config of a PipelineJob. + + Attributes: + parameters (Mapping[str, google.cloud.aiplatform_v1.types.Value]): + Deprecated. Use + [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1.PipelineJob.RuntimeConfig.parameter_values] + instead. The runtime parameters of the PipelineJob. The + parameters will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.0.0 or lower, + such as pipelines built using Kubeflow Pipelines SDK 1.8 or + lower. + gcs_output_directory (str): + Required. A path in a Cloud Storage bucket, which will be + treated as the root output directory of the pipeline. It is + used by the system to generate the paths of output + artifacts. The artifact paths are generated with a sub-path + pattern ``{job_id}/{task_id}/{output_key}`` under the + specified output directory. The service account specified in + this pipeline must have the ``storage.objects.get`` and + ``storage.objects.create`` permissions for this bucket. + parameter_values (Mapping[str, google.protobuf.struct_pb2.Value]): + The runtime parameters of the PipelineJob. The parameters + will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.1.0, such as + pipelines built using Kubeflow Pipelines SDK 1.9 or higher + and the v2 DSL. + failure_policy (google.cloud.aiplatform_v1.types.PipelineFailurePolicy): + Represents the failure policy of a pipeline. Currently, the + default of a pipeline is that the pipeline will continue to + run until no more tasks can be executed, also known as + PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is + set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop + scheduling any new tasks when a task has failed. Any + scheduled tasks will continue to completion. + """ + + parameters = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=gca_value.Value, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + parameter_values = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + failure_policy = proto.Field( + proto.ENUM, + number=4, + enum=pipeline_failure_policy.PipelineFailurePolicy, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + pipeline_spec = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Struct, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=pipeline_state.PipelineState, + ) + job_detail = proto.Field( + proto.MESSAGE, + number=9, + message='PipelineJobDetail', + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + runtime_config = proto.Field( + proto.MESSAGE, + number=12, + message=RuntimeConfig, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=16, + message=gca_encryption_spec.EncryptionSpec, + ) + service_account = proto.Field( + proto.STRING, + number=17, + ) + network = proto.Field( + proto.STRING, + number=18, + ) + template_uri = proto.Field( + proto.STRING, + number=19, + ) + template_metadata = proto.Field( + proto.MESSAGE, + number=20, + message='PipelineTemplateMetadata', + ) + + +class PipelineTemplateMetadata(proto.Message): + r"""Pipeline template metadata if + [PipelineJob.template_uri][google.cloud.aiplatform.v1.PipelineJob.template_uri] + is from supported template registry. Currently, the only supported + registry is Artifact Registry. + + Attributes: + version (str): + The version_name in artifact registry. + + Will always be presented in output if the + [PipelineJob.template_uri][google.cloud.aiplatform.v1.PipelineJob.template_uri] + is from supported template registry. + + Format is "sha256:abcdef123456...". + """ + + version = proto.Field( + proto.STRING, + number=3, + ) + + +class PipelineJobDetail(proto.Message): + r"""The runtime detail of PipelineJob. + + Attributes: + pipeline_context (google.cloud.aiplatform_v1.types.Context): + Output only. The context of the pipeline. + pipeline_run_context (google.cloud.aiplatform_v1.types.Context): + Output only. The context of the current + pipeline run. + task_details (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail]): + Output only. The runtime details of the tasks + under the pipeline. + """ + + pipeline_context = proto.Field( + proto.MESSAGE, + number=1, + message=context.Context, + ) + pipeline_run_context = proto.Field( + proto.MESSAGE, + number=2, + message=context.Context, + ) + task_details = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='PipelineTaskDetail', + ) + + +class PipelineTaskDetail(proto.Message): + r"""The runtime detail of a task execution. + + Attributes: + task_id (int): + Output only. The system generated ID of the + task. + parent_task_id (int): + Output only. The id of the parent task if the + task is within a component scope. Empty if the + task is at the root level. + task_name (str): + Output only. The user specified name of the task that is + defined in [PipelineJob.spec][]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task create time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task end time. + executor_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail): + Output only. The detailed execution info. + state (google.cloud.aiplatform_v1.types.PipelineTaskDetail.State): + Output only. State of the task. + execution (google.cloud.aiplatform_v1.types.Execution): + Output only. The execution metadata of the + task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + task execution. Only populated when the task's + state is FAILED or CANCELLED. + pipeline_task_status (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.PipelineTaskStatus]): + Output only. A list of task status. This + field keeps a record of task status evolving + over time. + inputs (Mapping[str, google.cloud.aiplatform_v1.types.PipelineTaskDetail.ArtifactList]): + Output only. The runtime input artifacts of + the task. + outputs (Mapping[str, google.cloud.aiplatform_v1.types.PipelineTaskDetail.ArtifactList]): + Output only. The runtime output artifacts of + the task. + """ + class State(proto.Enum): + r"""Specifies state of TaskExecution""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + SUCCEEDED = 3 + CANCEL_PENDING = 4 + CANCELLING = 5 + CANCELLED = 6 + FAILED = 7 + SKIPPED = 8 + NOT_TRIGGERED = 9 + + class PipelineTaskStatus(proto.Message): + r"""A single record of the task status. + + Attributes: + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Update time of this status. + state (google.cloud.aiplatform_v1.types.PipelineTaskDetail.State): + Output only. The state of the task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + the state. May be set when the state is any of + the non-final state (PENDING/RUNNING/CANCELLING) + or FAILED state. If the state is FAILED, the + error here is final and not going to be retried. + If the state is a non-final state, the error + indicates a system-error being retried. + """ + + update_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + state = proto.Field( + proto.ENUM, + number=2, + enum='PipelineTaskDetail.State', + ) + error = proto.Field( + proto.MESSAGE, + number=3, + message=status_pb2.Status, + ) + + class ArtifactList(proto.Message): + r"""A list of artifact metadata. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): + Output only. A list of artifact metadata. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=artifact.Artifact, + ) + + task_id = proto.Field( + proto.INT64, + number=1, + ) + parent_task_id = proto.Field( + proto.INT64, + number=12, + ) + task_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + executor_detail = proto.Field( + proto.MESSAGE, + number=6, + message='PipelineTaskExecutorDetail', + ) + state = proto.Field( + proto.ENUM, + number=7, + enum=State, + ) + execution = proto.Field( + proto.MESSAGE, + number=8, + message=gca_execution.Execution, + ) + error = proto.Field( + proto.MESSAGE, + number=9, + message=status_pb2.Status, + ) + pipeline_task_status = proto.RepeatedField( + proto.MESSAGE, + number=13, + message=PipelineTaskStatus, + ) + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=10, + message=ArtifactList, + ) + outputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=11, + message=ArtifactList, + ) + + +class PipelineTaskExecutorDetail(proto.Message): + r"""The runtime detail of a pipeline executor. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + container_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.ContainerDetail): + Output only. The detailed info for a + container executor. + + This field is a member of `oneof`_ ``details``. + custom_job_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.CustomJobDetail): + Output only. The detailed info for a custom + job executor. + + This field is a member of `oneof`_ ``details``. + """ + + class ContainerDetail(proto.Message): + r"""The detail of a container execution. It contains the job + names of the lifecycle of a container execution. + + Attributes: + main_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the + main container execution. + pre_caching_check_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the + pre-caching-check container execution. This job will be + available if the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] + specifies the ``pre_caching_check`` hook in the lifecycle + events. + """ + + main_job = proto.Field( + proto.STRING, + number=1, + ) + pre_caching_check_job = proto.Field( + proto.STRING, + number=2, + ) + + class CustomJobDetail(proto.Message): + r"""The detailed info for a custom job executor. + + Attributes: + job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob]. + """ + + job = proto.Field( + proto.STRING, + number=1, + ) + + container_detail = proto.Field( + proto.MESSAGE, + number=1, + oneof='details', + message=ContainerDetail, + ) + custom_job_detail = proto.Field( + proto.MESSAGE, + number=2, + oneof='details', + message=CustomJobDetail, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py new file mode 100644 index 0000000000..e4fa25decb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -0,0 +1,416 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateTrainingPipelineRequest', + 'GetTrainingPipelineRequest', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'DeleteTrainingPipelineRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'GetPipelineJobRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'DeletePipelineJobRequest', + 'CancelPipelineJobRequest', + }, +) + + +class CreateTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): + Required. The TrainingPipeline to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + training_pipeline = proto.Field( + proto.MESSAGE, + number=2, + message=gca_training_pipeline.TrainingPipeline, + ) + + +class GetTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTrainingPipelinesRequest(proto.Message): + r"""Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + + - ``NOT display_name="my_pipeline"`` + + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token] + of the previous + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListTrainingPipelinesResponse(proto.Message): + r"""Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + + Attributes: + training_pipelines (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline]): + List of TrainingPipelines in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + training_pipelines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_training_pipeline.TrainingPipeline, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource to be + deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreatePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not provided, an + ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + pipeline_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_pipeline_job.PipelineJob, + ) + pipeline_job_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListPipelineJobsRequest(proto.Message): + r"""Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the PipelineJobs that match the filter expression. The + following fields are supported: + + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``pipeline_job_user_id``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. for example, can check + if pipeline's display_name contains *step* by doing + display_name:"*step*" + - ``state``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality and key presence. + - ``template_uri``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``template_metadata.version``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. + + Filter expressions can be combined together using logical + operators (``AND`` & ``OR``). For example: + ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. + + The syntax to define filter expression is based on + https://google.aip.dev/160. + + Examples: + + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token] + of the previous + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + call. + order_by (str): + A comma-separated list of fields to order by. The default + sort order is in ascending order. Use "desc" after a field + name for descending. You can have multiple order_by fields + provided e.g. "create_time desc, end_time", "end_time, + start_time, update_time" For example, using "create_time + desc, end_time" will order results by create time in + descending order, and if there are multiple jobs having the + same create time, order them by the end time in ascending + order. if order_by is not specified, it will order by + default order is create time in descending order. Supported + fields: + + - ``create_time`` + - ``update_time`` + - ``end_time`` + - ``start_time`` + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListPipelineJobsResponse(proto.Message): + r"""Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Attributes: + pipeline_jobs (Sequence[google.cloud.aiplatform_v1.types.PipelineJob]): + List of PipelineJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1.ListPipelineJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + pipeline_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_pipeline_job.PipelineJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeletePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py new file mode 100644 index 0000000000..d45e3fc5f1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'PipelineState', + }, +) + + +class PipelineState(proto.Enum): + r"""Describes the state of a pipeline.""" + PIPELINE_STATE_UNSPECIFIED = 0 + PIPELINE_STATE_QUEUED = 1 + PIPELINE_STATE_PENDING = 2 + PIPELINE_STATE_RUNNING = 3 + PIPELINE_STATE_SUCCEEDED = 4 + PIPELINE_STATE_FAILED = 5 + PIPELINE_STATE_CANCELLING = 6 + PIPELINE_STATE_CANCELLED = 7 + PIPELINE_STATE_PAUSED = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py new file mode 100644 index 0000000000..8cd2364d1c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import explanation +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'PredictRequest', + 'PredictResponse', + 'RawPredictRequest', + 'ExplainRequest', + 'ExplainResponse', + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the prediction + call. A DeployedModel may have an upper limit on the number + of instances it supports per request, and when it is + exceeded the prediction call errors in case of AutoML + Models, or, in case of customer created Models, the + behaviour is as documented by that Model. The schema of any + single instance may be specified via Endpoint's + DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of the + parameters may be specified via Endpoint's DeployedModels' + [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + instances = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + parameters = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + Attributes: + predictions (Sequence[google.protobuf.struct_pb2.Value]): + The predictions that are the output of the predictions call. + The schema of any single prediction may be specified via + Endpoint's DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. + deployed_model_id (str): + ID of the Endpoint's DeployedModel that + served this prediction. + model (str): + Output only. The resource name of the Model + which is deployed as the DeployedModel that this + prediction hits. + model_display_name (str): + Output only. The [display + name][google.cloud.aiplatform.v1.Model.display_name] of the + Model which is deployed as the DeployedModel that this + prediction hits. + """ + + predictions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + model = proto.Field( + proto.STRING, + number=3, + ) + model_display_name = proto.Field( + proto.STRING, + number=4, + ) + + +class RawPredictRequest(proto.Message): + r"""Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + http_body (google.api.httpbody_pb2.HttpBody): + The prediction input. Supports HTTP headers and arbitrary + data payload. + + A [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for an + AutoML model, the + [RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for a + custom-trained model, the behavior varies depending on the + model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1.Model]. This schema + applies when you deploy the ``Model`` as a ``DeployedModel`` + to an [Endpoint][google.cloud.aiplatform.v1.Endpoint] and + use the ``RawPredict`` method. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + http_body = proto.Field( + proto.MESSAGE, + number=2, + message=httpbody_pb2.HttpBody, + ) + + +class ExplainRequest(proto.Message): + r"""Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper limit on + the number of instances it supports per request, and when it + is exceeded the explanation call errors in case of AutoML + Models, or, in case of customer created Models, the + behaviour is as documented by that Model. The schema of any + single instance may be specified via Endpoint's + DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of the + parameters may be specified via Endpoint's DeployedModels' + [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + explanation_spec_override (google.cloud.aiplatform_v1.types.ExplanationSpecOverride): + If specified, overrides the + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] + of the DeployedModel. Can be used for explaining prediction + results with different configurations, such as: + + - Explaining top-5 predictions results as opposed to top-1; + - Increasing path count or step count of the attribution + methods to reduce approximate errors; + - Using different baselines for explaining the prediction + results. + deployed_model_id (str): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + instances = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + parameters = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + explanation_spec_override = proto.Field( + proto.MESSAGE, + number=5, + message=explanation.ExplanationSpecOverride, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=3, + ) + + +class ExplainResponse(proto.Message): + r"""Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. + + Attributes: + explanations (Sequence[google.cloud.aiplatform_v1.types.Explanation]): + The explanations of the Model's + [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions]. + + It has the same number of elements as + [instances][google.cloud.aiplatform.v1.ExplainRequest.instances] + to be explained. + deployed_model_id (str): + ID of the Endpoint's DeployedModel that + served this explanation. + predictions (Sequence[google.protobuf.struct_pb2.Value]): + The predictions that are the output of the predictions call. + Same as + [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions]. + """ + + explanations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=explanation.Explanation, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + predictions = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py new file mode 100644 index 0000000000..730f857357 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'SpecialistPool', + }, +) + + +class SpecialistPool(proto.Message): + r"""SpecialistPool represents customers' own workforce to work on + their data labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for managing the + workers in this pool as well as customers' data labeling jobs + associated with this pool. Customers create specialist pool as + well as start data labeling jobs on Cloud, managers and workers + handle the jobs using CrowdCompute console. + + Attributes: + name (str): + Required. The resource name of the + SpecialistPool. + display_name (str): + Required. The user-defined name of the + SpecialistPool. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + This field should be unique on project-level. + specialist_managers_count (int): + Output only. The number of managers in this + SpecialistPool. + specialist_manager_emails (Sequence[str]): + The email addresses of the managers in the + SpecialistPool. + pending_data_labeling_jobs (Sequence[str]): + Output only. The resource name of the pending + data labeling jobs. + specialist_worker_emails (Sequence[str]): + The email addresses of workers in the + SpecialistPool. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + specialist_managers_count = proto.Field( + proto.INT32, + number=3, + ) + specialist_manager_emails = proto.RepeatedField( + proto.STRING, + number=4, + ) + pending_data_labeling_jobs = proto.RepeatedField( + proto.STRING, + number=5, + ) + specialist_worker_emails = proto.RepeatedField( + proto.STRING, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py new file mode 100644 index 0000000000..f11aa211d8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py @@ -0,0 +1,237 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import operation +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateSpecialistPoolRequest', + 'CreateSpecialistPoolOperationMetadata', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'DeleteSpecialistPoolRequest', + 'UpdateSpecialistPoolRequest', + 'UpdateSpecialistPoolOperationMetadata', + }, +) + + +class CreateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + specialist_pool = proto.Field( + proto.MESSAGE, + number=2, + message=gca_specialist_pool.SpecialistPool, + ) + + +class CreateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation information for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. + + Attributes: + name (str): + Required. The name of the SpecialistPool resource. The form + is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSpecialistPoolsRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + parent (str): + Required. The name of the SpecialistPool's parent resource. + Format: ``projects/{project}/locations/{location}`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained by + [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1.ListSpecialistPoolsResponse.next_page_token] + of the previous + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools] + call. Return first page if empty. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + FieldMask represents a set of + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=4, + message=field_mask_pb2.FieldMask, + ) + + +class ListSpecialistPoolsResponse(proto.Message): + r"""Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + specialist_pools (Sequence[google.cloud.aiplatform_v1.types.SpecialistPool]): + A list of SpecialistPools that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + specialist_pools = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. + + Attributes: + name (str): + Required. The resource name of the SpecialistPool to delete. + Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + force (bool): + If set to true, any specialist managers in + this SpecialistPool will also be deleted. + (Otherwise, the request will only work if the + SpecialistPool has no specialist managers.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class UpdateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + specialist_pool = proto.Field( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation metadata for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (str): + Output only. The name of the SpecialistPool to which the + specialists are being added. Format: + ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + specialist_pool = proto.Field( + proto.STRING, + number=1, + ) + generic_metadata = proto.Field( + proto.MESSAGE, + number=2, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py new file mode 100644 index 0000000000..5a1d0ad286 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py @@ -0,0 +1,892 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Study', + 'Trial', + 'StudySpec', + 'Measurement', + }, +) + + +class Study(proto.Message): + r"""A message representing a Study. + + Attributes: + name (str): + Output only. The name of a study. The study's globally + unique identifier. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + display_name (str): + Required. Describes the Study, default value + is empty string. + study_spec (google.cloud.aiplatform_v1.types.StudySpec): + Required. Configuration of the Study. + state (google.cloud.aiplatform_v1.types.Study.State): + Output only. The detailed state of a Study. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time at which the study was + created. + inactive_reason (str): + Output only. A human readable reason why the + Study is inactive. This should be empty if a + study is ACTIVE or COMPLETED. + """ + class State(proto.Enum): + r"""Describes the Study state.""" + STATE_UNSPECIFIED = 0 + ACTIVE = 1 + INACTIVE = 2 + COMPLETED = 3 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + study_spec = proto.Field( + proto.MESSAGE, + number=3, + message='StudySpec', + ) + state = proto.Field( + proto.ENUM, + number=4, + enum=State, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + inactive_reason = proto.Field( + proto.STRING, + number=6, + ) + + +class Trial(proto.Message): + r"""A message representing a Trial. A Trial contains a unique set + of Parameters that has been or will be evaluated, along with the + objective metrics got by running the Trial. + + Attributes: + name (str): + Output only. Resource name of the Trial + assigned by the service. + id (str): + Output only. The identifier of the Trial + assigned by the service. + state (google.cloud.aiplatform_v1.types.Trial.State): + Output only. The detailed state of the Trial. + parameters (Sequence[google.cloud.aiplatform_v1.types.Trial.Parameter]): + Output only. The parameters of the Trial. + final_measurement (google.cloud.aiplatform_v1.types.Measurement): + Output only. The final measurement containing + the objective value. + measurements (Sequence[google.cloud.aiplatform_v1.types.Measurement]): + Output only. A list of measurements that are strictly + lexicographically ordered by their induced tuples (steps, + elapsed_duration). These are used for early stopping + computations. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial's status changed to + ``SUCCEEDED`` or ``INFEASIBLE``. + client_id (str): + Output only. The identifier of the client that originally + requested this Trial. Each client is identified by a unique + client_id. When a client asks for a suggestion, Vertex AI + Vizier will assign it a Trial. The client should evaluate + the Trial, complete it, and report back to Vertex AI Vizier. + If suggestion is asked again by same client_id before the + Trial is completed, the same Trial will be returned. + Multiple clients with different client_ids can ask for + suggestions simultaneously, each of them will get their own + Trial. + infeasible_reason (str): + Output only. A human readable string describing why the + Trial is infeasible. This is set only if Trial state is + ``INFEASIBLE``. + custom_job (str): + Output only. The CustomJob name linked to the + Trial. It's set for a HyperparameterTuningJob's + Trial. + web_access_uris (Mapping[str, str]): + Output only. URIs for accessing `interactive + shells `__ + (one URI for each training node). Only available if this + trial is part of a + [HyperparameterTuningJob][google.cloud.aiplatform.v1.HyperparameterTuningJob] + and the job's + [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1.CustomJobSpec.enable_web_access] + field is ``true``. + + The keys are names of each node used for the trial; for + example, ``workerpool0-0`` for the primary node, + ``workerpool1-0`` for the first node in the second worker + pool, and ``workerpool1-1`` for the second node in the + second worker pool. + + The values are the URIs for each node's interactive shell. + """ + class State(proto.Enum): + r"""Describes a Trial state.""" + STATE_UNSPECIFIED = 0 + REQUESTED = 1 + ACTIVE = 2 + STOPPING = 3 + SUCCEEDED = 4 + INFEASIBLE = 5 + + class Parameter(proto.Message): + r"""A message representing a parameter to be tuned. + + Attributes: + parameter_id (str): + Output only. The ID of the parameter. The parameter should + be defined in [StudySpec's + Parameters][google.cloud.aiplatform.v1.StudySpec.parameters]. + value (google.protobuf.struct_pb2.Value): + Output only. The value of the parameter. ``number_value`` + will be set if a parameter defined in StudySpec is in type + 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be + set if a parameter defined in StudySpec is in type + 'CATEGORICAL'. + """ + + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + id = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Parameter, + ) + final_measurement = proto.Field( + proto.MESSAGE, + number=5, + message='Measurement', + ) + measurements = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='Measurement', + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + client_id = proto.Field( + proto.STRING, + number=9, + ) + infeasible_reason = proto.Field( + proto.STRING, + number=10, + ) + custom_job = proto.Field( + proto.STRING, + number=11, + ) + web_access_uris = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) + + +class StudySpec(proto.Message): + r"""Represents specification of a Study. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + decay_curve_stopping_spec (google.cloud.aiplatform_v1.types.StudySpec.DecayCurveAutomatedStoppingSpec): + The automated early stopping spec using decay + curve rule. + + This field is a member of `oneof`_ ``automated_stopping_spec``. + median_automated_stopping_spec (google.cloud.aiplatform_v1.types.StudySpec.MedianAutomatedStoppingSpec): + The automated early stopping spec using + median rule. + + This field is a member of `oneof`_ ``automated_stopping_spec``. + convex_automated_stopping_spec (google.cloud.aiplatform_v1.types.StudySpec.ConvexAutomatedStoppingSpec): + The automated early stopping spec using + convex stopping rule. + + This field is a member of `oneof`_ ``automated_stopping_spec``. + metrics (Sequence[google.cloud.aiplatform_v1.types.StudySpec.MetricSpec]): + Required. Metric specs for the Study. + parameters (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec]): + Required. The set of parameters to tune. + algorithm (google.cloud.aiplatform_v1.types.StudySpec.Algorithm): + The search algorithm specified for the Study. + observation_noise (google.cloud.aiplatform_v1.types.StudySpec.ObservationNoise): + The observation noise level of the study. + Currently only supported by the Vertex AI Vizier + service. Not supported by HyperparamterTuningJob + or TrainingPipeline. + measurement_selection_type (google.cloud.aiplatform_v1.types.StudySpec.MeasurementSelectionType): + Describe which measurement selection type + will be used + """ + class Algorithm(proto.Enum): + r"""The available search algorithms for the Study.""" + ALGORITHM_UNSPECIFIED = 0 + GRID_SEARCH = 2 + RANDOM_SEARCH = 3 + + class ObservationNoise(proto.Enum): + r"""Describes the noise level of the repeated observations. + "Noisy" means that the repeated observations with the same Trial + parameters may lead to different metric evaluations. + """ + OBSERVATION_NOISE_UNSPECIFIED = 0 + LOW = 1 + HIGH = 2 + + class MeasurementSelectionType(proto.Enum): + r"""This indicates which measurement to use if/when the service + automatically selects the final measurement from previously reported + intermediate measurements. Choose this based on two considerations: + A) Do you expect your measurements to monotonically improve? If so, + choose LAST_MEASUREMENT. On the other hand, if you're in a situation + where your system can "over-train" and you expect the performance to + get better for a while but then start declining, choose + BEST_MEASUREMENT. B) Are your measurements significantly noisy + and/or irreproducible? If so, BEST_MEASUREMENT will tend to be + over-optimistic, and it may be better to choose LAST_MEASUREMENT. If + both or neither of (A) and (B) apply, it doesn't matter which + selection type is chosen. + """ + MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 + LAST_MEASUREMENT = 1 + BEST_MEASUREMENT = 2 + + class MetricSpec(proto.Message): + r"""Represents a metric to optimize. + + Attributes: + metric_id (str): + Required. The ID of the metric. Must not + contain whitespaces and must be unique amongst + all MetricSpecs. + goal (google.cloud.aiplatform_v1.types.StudySpec.MetricSpec.GoalType): + Required. The optimization goal of the + metric. + """ + class GoalType(proto.Enum): + r"""The available types of optimization goals.""" + GOAL_TYPE_UNSPECIFIED = 0 + MAXIMIZE = 1 + MINIMIZE = 2 + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + goal = proto.Field( + proto.ENUM, + number=2, + enum='StudySpec.MetricSpec.GoalType', + ) + + class ParameterSpec(proto.Message): + r"""Represents a single parameter to optimize. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + double_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DoubleValueSpec): + The value spec for a 'DOUBLE' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + integer_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.IntegerValueSpec): + The value spec for an 'INTEGER' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + categorical_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.CategoricalValueSpec): + The value spec for a 'CATEGORICAL' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + discrete_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DiscreteValueSpec): + The value spec for a 'DISCRETE' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + parameter_id (str): + Required. The ID of the parameter. Must not + contain whitespaces and must be unique amongst + all ParameterSpecs. + scale_type (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ScaleType): + How the parameter should be scaled. Leave unset for + ``CATEGORICAL`` parameters. + conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): + A conditional parameter node is active if the parameter's + value matches the conditional node's parent_value_condition. + + If two items in conditional_parameter_specs have the same + name, they must have disjoint parent_value_condition. + """ + class ScaleType(proto.Enum): + r"""The type of scaling that should be applied to this parameter.""" + SCALE_TYPE_UNSPECIFIED = 0 + UNIT_LINEAR_SCALE = 1 + UNIT_LOG_SCALE = 2 + UNIT_REVERSE_LOG_SCALE = 3 + + class DoubleValueSpec(proto.Message): + r"""Value specification for a parameter in ``DOUBLE`` type. + + Attributes: + min_value (float): + Required. Inclusive minimum value of the + parameter. + max_value (float): + Required. Inclusive maximum value of the + parameter. + default_value (float): + A default value for a ``DOUBLE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + min_value = proto.Field( + proto.DOUBLE, + number=1, + ) + max_value = proto.Field( + proto.DOUBLE, + number=2, + ) + default_value = proto.Field( + proto.DOUBLE, + number=4, + optional=True, + ) + + class IntegerValueSpec(proto.Message): + r"""Value specification for a parameter in ``INTEGER`` type. + + Attributes: + min_value (int): + Required. Inclusive minimum value of the + parameter. + max_value (int): + Required. Inclusive maximum value of the + parameter. + default_value (int): + A default value for an ``INTEGER`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + min_value = proto.Field( + proto.INT64, + number=1, + ) + max_value = proto.Field( + proto.INT64, + number=2, + ) + default_value = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + + class CategoricalValueSpec(proto.Message): + r"""Value specification for a parameter in ``CATEGORICAL`` type. + + Attributes: + values (Sequence[str]): + Required. The list of possible categories. + default_value (str): + A default value for a ``CATEGORICAL`` parameter that is + assumed to be a relatively good starting point. Unset value + signals that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + default_value = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + + class DiscreteValueSpec(proto.Message): + r"""Value specification for a parameter in ``DISCRETE`` type. + + Attributes: + values (Sequence[float]): + Required. A list of possible values. + The list should be in increasing order and at + least 1e-10 apart. For instance, this parameter + might have possible settings of 1.5, 2.5, and + 4.0. This list should not contain more than + 1,000 values. + default_value (float): + A default value for a ``DISCRETE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. It automatically + rounds to the nearest feasible discrete point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + default_value = proto.Field( + proto.DOUBLE, + number=3, + optional=True, + ) + + class ConditionalParameterSpec(proto.Message): + r"""Represents a parameter spec with condition from its parent + parameter. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + parent_discrete_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): + The spec for matching values from a parent parameter of + ``DISCRETE`` type. + + This field is a member of `oneof`_ ``parent_value_condition``. + parent_int_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): + The spec for matching values from a parent parameter of + ``INTEGER`` type. + + This field is a member of `oneof`_ ``parent_value_condition``. + parent_categorical_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): + The spec for matching values from a parent parameter of + ``CATEGORICAL`` type. + + This field is a member of `oneof`_ ``parent_value_condition``. + parameter_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec): + Required. The spec for a conditional + parameter. + """ + + class DiscreteValueCondition(proto.Message): + r"""Represents the spec to match discrete values from parent + parameter. + + Attributes: + values (Sequence[float]): + Required. Matches values of the parent parameter of + 'DISCRETE' type. All values must exist in + ``discrete_value_spec`` of parent parameter. + + The Epsilon of the value matching is 1e-10. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + class IntValueCondition(proto.Message): + r"""Represents the spec to match integer values from parent + parameter. + + Attributes: + values (Sequence[int]): + Required. Matches values of the parent parameter of + 'INTEGER' type. All values must lie in + ``integer_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.INT64, + number=1, + ) + + class CategoricalValueCondition(proto.Message): + r"""Represents the spec to match categorical values from parent + parameter. + + Attributes: + values (Sequence[str]): + Required. Matches values of the parent parameter of + 'CATEGORICAL' type. All values must exist in + ``categorical_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + parent_discrete_values = proto.Field( + proto.MESSAGE, + number=2, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', + ) + parent_int_values = proto.Field( + proto.MESSAGE, + number=3, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', + ) + parent_categorical_values = proto.Field( + proto.MESSAGE, + number=4, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', + ) + parameter_spec = proto.Field( + proto.MESSAGE, + number=1, + message='StudySpec.ParameterSpec', + ) + + double_value_spec = proto.Field( + proto.MESSAGE, + number=2, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DoubleValueSpec', + ) + integer_value_spec = proto.Field( + proto.MESSAGE, + number=3, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.IntegerValueSpec', + ) + categorical_value_spec = proto.Field( + proto.MESSAGE, + number=4, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.CategoricalValueSpec', + ) + discrete_value_spec = proto.Field( + proto.MESSAGE, + number=5, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DiscreteValueSpec', + ) + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + scale_type = proto.Field( + proto.ENUM, + number=6, + enum='StudySpec.ParameterSpec.ScaleType', + ) + conditional_parameter_specs = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='StudySpec.ParameterSpec.ConditionalParameterSpec', + ) + + class DecayCurveAutomatedStoppingSpec(proto.Message): + r"""The decay curve automated stopping rule builds a Gaussian + Process Regressor to predict the final objective value of a + Trial based on the already completed Trials and the intermediate + measurements of the current Trial. Early stopping is requested + for the current Trial if there is very low probability to exceed + the optimal value found so far. + + Attributes: + use_elapsed_duration (bool): + True if + [Measurement.elapsed_duration][google.cloud.aiplatform.v1.Measurement.elapsed_duration] + is used as the x-axis of each Trials Decay Curve. Otherwise, + [Measurement.step_count][google.cloud.aiplatform.v1.Measurement.step_count] + will be used as the x-axis. + """ + + use_elapsed_duration = proto.Field( + proto.BOOL, + number=1, + ) + + class MedianAutomatedStoppingSpec(proto.Message): + r"""The median automated stopping rule stops a pending Trial if the + Trial's best objective_value is strictly below the median + 'performance' of all completed Trials reported up to the Trial's + last measurement. Currently, 'performance' refers to the running + average of the objective values reported by the Trial in each + measurement. + + Attributes: + use_elapsed_duration (bool): + True if median automated stopping rule applies on + [Measurement.elapsed_duration][google.cloud.aiplatform.v1.Measurement.elapsed_duration]. + It means that elapsed_duration field of latest measurement + of current Trial is used to compute median objective value + for each completed Trials. + """ + + use_elapsed_duration = proto.Field( + proto.BOOL, + number=1, + ) + + class ConvexAutomatedStoppingSpec(proto.Message): + r"""Configuration for ConvexAutomatedStoppingSpec. When there are enough + completed trials (configured by min_measurement_count), for pending + trials with enough measurements and steps, the policy first computes + an overestimate of the objective value at max_num_steps according to + the slope of the incomplete objective value curve. No prediction can + be made if the curve is completely flat. If the overestimation is + worse than the best objective value of the completed trials, this + pending trial will be early-stopped, but a last measurement will be + added to the pending trial with max_num_steps and predicted + objective value from the autoregression model. + + Attributes: + max_step_count (int): + Steps used in predicting the final objective for early + stopped trials. In general, it's set to be the same as the + defined steps in training / tuning. If not defined, it will + learn it from the completed trials. When use_steps is false, + this field is set to the maximum elapsed seconds. + min_step_count (int): + Minimum number of steps for a trial to complete. Trials + which do not have a measurement with step_count > + min_step_count won't be considered for early stopping. It's + ok to set it to 0, and a trial can be early stopped at any + stage. By default, min_step_count is set to be one-tenth of + the max_step_count. When use_elapsed_duration is true, this + field is set to the minimum elapsed seconds. + min_measurement_count (int): + The minimal number of measurements in a Trial. + Early-stopping checks will not trigger if less than + min_measurement_count+1 completed trials or pending trials + with less than min_measurement_count measurements. If not + defined, the default value is 5. + learning_rate_parameter_name (str): + The hyper-parameter name used in the tuning job that stands + for learning rate. Leave it blank if learning rate is not in + a parameter in tuning. The learning_rate is used to estimate + the objective value of the ongoing trial. + use_elapsed_duration (bool): + This bool determines whether or not the rule is applied + based on elapsed_secs or steps. If + use_elapsed_duration==false, the early stopping decision is + made according to the predicted objective values according + to the target steps. If use_elapsed_duration==true, + elapsed_secs is used instead of steps. Also, in this case, + the parameters max_num_steps and min_num_steps are + overloaded to contain max_elapsed_seconds and + min_elapsed_seconds. + """ + + max_step_count = proto.Field( + proto.INT64, + number=1, + ) + min_step_count = proto.Field( + proto.INT64, + number=2, + ) + min_measurement_count = proto.Field( + proto.INT64, + number=3, + ) + learning_rate_parameter_name = proto.Field( + proto.STRING, + number=4, + ) + use_elapsed_duration = proto.Field( + proto.BOOL, + number=5, + ) + + decay_curve_stopping_spec = proto.Field( + proto.MESSAGE, + number=4, + oneof='automated_stopping_spec', + message=DecayCurveAutomatedStoppingSpec, + ) + median_automated_stopping_spec = proto.Field( + proto.MESSAGE, + number=5, + oneof='automated_stopping_spec', + message=MedianAutomatedStoppingSpec, + ) + convex_automated_stopping_spec = proto.Field( + proto.MESSAGE, + number=9, + oneof='automated_stopping_spec', + message=ConvexAutomatedStoppingSpec, + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=MetricSpec, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=ParameterSpec, + ) + algorithm = proto.Field( + proto.ENUM, + number=3, + enum=Algorithm, + ) + observation_noise = proto.Field( + proto.ENUM, + number=6, + enum=ObservationNoise, + ) + measurement_selection_type = proto.Field( + proto.ENUM, + number=7, + enum=MeasurementSelectionType, + ) + + +class Measurement(proto.Message): + r"""A message representing a Measurement of a Trial. A + Measurement contains the Metrics got by executing a Trial using + suggested hyperparameter values. + + Attributes: + elapsed_duration (google.protobuf.duration_pb2.Duration): + Output only. Time that the Trial has been + running at the point of this Measurement. + step_count (int): + Output only. The number of steps the machine + learning model has been trained for. Must be + non-negative. + metrics (Sequence[google.cloud.aiplatform_v1.types.Measurement.Metric]): + Output only. A list of metrics got by + evaluating the objective functions using + suggested Parameter values. + """ + + class Metric(proto.Message): + r"""A message representing a metric in the measurement. + + Attributes: + metric_id (str): + Output only. The ID of the Metric. The Metric should be + defined in [StudySpec's + Metrics][google.cloud.aiplatform.v1.StudySpec.metrics]. + value (float): + Output only. The value for this metric. + """ + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.DOUBLE, + number=2, + ) + + elapsed_duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + step_count = proto.Field( + proto.INT64, + number=2, + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Metric, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py new file mode 100644 index 0000000000..99efb037de --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Tensorboard', + }, +) + + +class Tensorboard(proto.Message): + r"""Tensorboard is a physical database that stores users' + training metrics. A default Tensorboard is provided in each + region of a GCP project. If needed users can also create extra + Tensorboards in their projects. + + Attributes: + name (str): + Output only. Name of the Tensorboard. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + display_name (str): + Required. User provided name of this + Tensorboard. + description (str): + Description of this Tensorboard. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Tensorboard. If set, this Tensorboard and all + sub-resources of this Tensorboard will be + secured by this key. + blob_storage_path_prefix (str): + Output only. Consumer project Cloud Storage + path prefix used to store blob data, which can + either be a bucket or directory. Does not end + with a '/'. + run_count (int): + Output only. The number of Runs stored in + this Tensorboard. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was last updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Tensorboards. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Tensorboard (System labels + are excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=11, + message=gca_encryption_spec.EncryptionSpec, + ) + blob_storage_path_prefix = proto.Field( + proto.STRING, + number=10, + ) + run_count = proto.Field( + proto.INT32, + number=5, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py new file mode 100644 index 0000000000..e149ecd936 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import tensorboard_time_series +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'Scalar', + 'TensorboardTensor', + 'TensorboardBlobSequence', + 'TensorboardBlob', + }, +) + + +class TimeSeriesData(proto.Message): + r"""All the data stored in a TensorboardTimeSeries. + + Attributes: + tensorboard_time_series_id (str): + Required. The ID of the + TensorboardTimeSeries, which will become the + final component of the TensorboardTimeSeries' + resource name + value_type (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. The value type of this + time series. All the values in this time series + data must match this value type. + values (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesDataPoint]): + Required. Data points in this time series. + """ + + tensorboard_time_series_id = proto.Field( + proto.STRING, + number=1, + ) + value_type = proto.Field( + proto.ENUM, + number=2, + enum=tensorboard_time_series.TensorboardTimeSeries.ValueType, + ) + values = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='TimeSeriesDataPoint', + ) + + +class TimeSeriesDataPoint(proto.Message): + r"""A TensorboardTimeSeries data point. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + scalar (google.cloud.aiplatform_v1.types.Scalar): + A scalar value. + + This field is a member of `oneof`_ ``value``. + tensor (google.cloud.aiplatform_v1.types.TensorboardTensor): + A tensor value. + + This field is a member of `oneof`_ ``value``. + blobs (google.cloud.aiplatform_v1.types.TensorboardBlobSequence): + A blob sequence value. + + This field is a member of `oneof`_ ``value``. + wall_time (google.protobuf.timestamp_pb2.Timestamp): + Wall clock timestamp when this data point is + generated by the end user. + step (int): + Step index of this data point within the run. + """ + + scalar = proto.Field( + proto.MESSAGE, + number=3, + oneof='value', + message='Scalar', + ) + tensor = proto.Field( + proto.MESSAGE, + number=4, + oneof='value', + message='TensorboardTensor', + ) + blobs = proto.Field( + proto.MESSAGE, + number=5, + oneof='value', + message='TensorboardBlobSequence', + ) + wall_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + step = proto.Field( + proto.INT64, + number=2, + ) + + +class Scalar(proto.Message): + r"""One point viewable on a scalar metric plot. + + Attributes: + value (float): + Value of the point at this step / timestamp. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + ) + + +class TensorboardTensor(proto.Message): + r"""One point viewable on a tensor metric plot. + + Attributes: + value (bytes): + Required. Serialized form of + https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto + version_number (int): + Optional. Version number of TensorProto used to serialize + [value][google.cloud.aiplatform.v1.TensorboardTensor.value]. + """ + + value = proto.Field( + proto.BYTES, + number=1, + ) + version_number = proto.Field( + proto.INT32, + number=2, + ) + + +class TensorboardBlobSequence(proto.Message): + r"""One point viewable on a blob metric plot, but mostly just a wrapper + message to work around repeated fields can't be used directly within + ``oneof`` fields. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1.types.TensorboardBlob]): + List of blobs contained within the sequence. + """ + + values = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='TensorboardBlob', + ) + + +class TensorboardBlob(proto.Message): + r"""One blob (e.g, image, graph) viewable on a blob metric plot. + + Attributes: + id (str): + Output only. A URI safe key uniquely + identifying a blob. Can be used to locate the + blob stored in the Cloud Storage bucket of the + consumer project. + data (bytes): + Optional. The bytes of the blob is not + present unless it's returned by the + ReadTensorboardBlobData endpoint. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + data = proto.Field( + proto.BYTES, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py new file mode 100644 index 0000000000..8d10c93d06 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'TensorboardExperiment', + }, +) + + +class TensorboardExperiment(proto.Message): + r"""A TensorboardExperiment is a group of TensorboardRuns, that + are typically the results of a training job run, in a + Tensorboard. + + Attributes: + name (str): + Output only. Name of the TensorboardExperiment. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + display_name (str): + User provided name of this + TensorboardExperiment. + description (str): + Description of this TensorboardExperiment. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was last updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + + - output only, its value is the + [metadata_schema's][metadata_schema_uri] title. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + source (str): + Immutable. Source of the + TensorboardExperiment. Example: a custom + training job. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + source = proto.Field( + proto.STRING, + number=8, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py new file mode 100644 index 0000000000..54f978d07d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'TensorboardRun', + }, +) + + +class TensorboardRun(proto.Message): + r"""TensorboardRun maps to a specific execution of a training job + with a given set of hyperparameter values, model definition, + dataset, etc + + Attributes: + name (str): + Output only. Name of the TensorboardRun. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + display_name (str): + Required. User provided name of this + TensorboardRun. This value must be unique among + all TensorboardRuns belonging to the same parent + TensorboardExperiment. + description (str): + Description of this TensorboardRun. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was last updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + TensorboardRuns. + + This field will be used to filter and visualize Runs in the + Tensorboard UI. For example, a Vertex AI training job can + set a label aiplatform.googleapis.com/training_job_id=xxxxx + to all the runs created within that job. An end user can set + a label experiment_id=xxxxx for all the runs produced in a + Jupyter notebook. These runs can be grouped by a label value + and visualized together in the Tensorboard UI. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one TensorboardRun (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py new file mode 100644 index 0000000000..1824eca563 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py @@ -0,0 +1,1224 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import operation +from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_data +from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateTensorboardRequest', + 'GetTensorboardRequest', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'UpdateTensorboardRequest', + 'DeleteTensorboardRequest', + 'CreateTensorboardExperimentRequest', + 'GetTensorboardExperimentRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'UpdateTensorboardExperimentRequest', + 'DeleteTensorboardExperimentRequest', + 'BatchCreateTensorboardRunsRequest', + 'BatchCreateTensorboardRunsResponse', + 'CreateTensorboardRunRequest', + 'GetTensorboardRunRequest', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'UpdateTensorboardRunRequest', + 'DeleteTensorboardRunRequest', + 'BatchCreateTensorboardTimeSeriesRequest', + 'BatchCreateTensorboardTimeSeriesResponse', + 'CreateTensorboardTimeSeriesRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'UpdateTensorboardTimeSeriesRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'BatchReadTensorboardTimeSeriesDataRequest', + 'BatchReadTensorboardTimeSeriesDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'WriteTensorboardExperimentDataRequest', + 'WriteTensorboardExperimentDataResponse', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'CreateTensorboardOperationMetadata', + 'UpdateTensorboardOperationMetadata', + }, +) + + +class CreateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): + Required. The Tensorboard to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard.Tensorboard, + ) + + +class GetTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1.TensorboardService.GetTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the Tensorboards that match the filter + expression. + page_size (int): + The maximum number of Tensorboards to return. + The service may return fewer than this value. If + unspecified, at most 100 Tensorboards will be + returned. The maximum value is 100; values above + 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. + + Attributes: + tensorboards (Sequence[google.cloud.aiplatform_v1.types.Tensorboard]): + The Tensorboards mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboards = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard.Tensorboard, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard.Tensorboard, + ) + + +class DeleteTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard to be deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment]. + + Attributes: + parent (str): + Required. The resource name of the Tensorboard to create the + TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): + The TensorboardExperiment to create. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + tensorboard_experiment_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardExperimentsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardExperiments. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + filter (str): + Lists the TensorboardExperiments that match + the filter expression. + page_size (int): + The maximum number of TensorboardExperiments + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardExperiments will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardExperimentsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + tensorboard_experiments (Sequence[google.cloud.aiplatform_v1.types.TensorboardExperiment]): + The TensorboardExperiments mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardExperimentsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_experiments = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is used + to identify the TensorboardExperiment to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + + +class DeleteTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class BatchCreateTensorboardRunsRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest messages + must match this field. + requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]): + Required. The request message specifying the + TensorboardRuns to create. A maximum of 1000 + TensorboardRuns can be created in a batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='CreateTensorboardRunRequest', + ) + + +class BatchCreateTensorboardRunsResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. + + Attributes: + tensorboard_runs (Sequence[google.cloud.aiplatform_v1.types.TensorboardRun]): + The created TensorboardRuns. + """ + + tensorboard_runs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_run.TensorboardRun, + ) + + +class CreateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): + Required. The TensorboardRun to create. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which will + become the final component of the Tensorboard run's resource + name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_run = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_run.TensorboardRun, + ) + tensorboard_run_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ReadTensorboardBlobDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + time_series (str): + Required. The resource name of the TensorboardTimeSeries to + list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + blob_ids (Sequence[str]): + IDs of the blobs to read. + """ + + time_series = proto.Field( + proto.STRING, + number=1, + ) + blob_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class ReadTensorboardBlobDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + blobs (Sequence[google.cloud.aiplatform_v1.types.TensorboardBlob]): + Blob messages containing blob bytes. + """ + + blobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TensorboardBlob, + ) + + +class ListTensorboardRunsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. + + Attributes: + parent (str): + Required. The resource name of the + TensorboardExperiment to list TensorboardRuns. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + filter (str): + Lists the TensorboardRuns that match the + filter expression. + page_size (int): + The maximum number of TensorboardRuns to + return. The service may return fewer than this + value. If unspecified, at most 50 + TensorboardRuns will be returned. The maximum + value is 1000; values above 1000 will be coerced + to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardRunsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. + + Attributes: + tensorboard_runs (Sequence[google.cloud.aiplatform_v1.types.TensorboardRun]): + The TensorboardRuns mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardRunsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_runs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_run.TensorboardRun, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the update. + The fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_run = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_run.TensorboardRun, + ) + + +class DeleteTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class BatchCreateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in the + CreateTensorboardTimeSeriesRequest messages must be sub + resources of this TensorboardExperiment. + requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]): + Required. The request message specifying the + TensorboardTimeSeries to create. A maximum of + 1000 TensorboardTimeSeries can be created in a + batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='CreateTensorboardTimeSeriesRequest', + ) + + +class BatchCreateTensorboardTimeSeriesResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + Attributes: + tensorboard_time_series (Sequence[google.cloud.aiplatform_v1.types.TensorboardTimeSeries]): + The created TensorboardTimeSeries. + """ + + tensorboard_time_series = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class CreateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardRun to create + the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + tensorboard_time_series_id (str): + Optional. The user specified unique ID to use for the + TensorboardTimeSeries, which will become the final component + of the TensorboardTimeSeries's resource name. This value + should match "[a-z0-9][a-z0-9-]{0, 127}". + tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_time_series_id = proto.Field( + proto.STRING, + number=3, + ) + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class GetTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the + TensorboardRun to list TensorboardTimeSeries. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + filter (str): + Lists the TensorboardTimeSeries that match + the filter expression. + page_size (int): + The maximum number of TensorboardTimeSeries + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardTimeSeries will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardTimeSeriesResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + tensorboard_time_series (Sequence[google.cloud.aiplatform_v1.types.TensorboardTimeSeries]): + The TensorboardTimeSeries mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardTimeSeriesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_time_series = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is used + to identify the TensorboardTimeSeries to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class DeleteTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class BatchReadTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + Attributes: + tensorboard (str): + Required. The resource name of the Tensorboard containing + TensorboardTimeSeries to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + time_series (Sequence[str]): + Required. The resource names of the TensorboardTimeSeries to + read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + tensorboard = proto.Field( + proto.STRING, + number=1, + ) + time_series = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class BatchReadTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + Attributes: + time_series_data (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]): + The returned time series data. + """ + + time_series_data = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesData, + ) + + +class ReadTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + max_data_points (int): + The maximum number of TensorboardTimeSeries' + data to return. + This value should be a positive integer. + This value can be set to -1 to return all data. + filter (str): + Reads the TensorboardTimeSeries' data that + match the filter expression. + """ + + tensorboard_time_series = proto.Field( + proto.STRING, + number=1, + ) + max_data_points = proto.Field( + proto.INT32, + number=2, + ) + filter = proto.Field( + proto.STRING, + number=3, + ) + + +class ReadTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + time_series_data (google.cloud.aiplatform_v1.types.TimeSeriesData): + The returned time series data. + """ + + time_series_data = proto.Field( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardExperimentDataRequest(proto.Message): + r"""Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. + + Attributes: + tensorboard_experiment (str): + Required. The resource name of the TensorboardExperiment to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + write_run_data_requests (Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + """ + + tensorboard_experiment = proto.Field( + proto.STRING, + number=1, + ) + write_run_data_requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='WriteTensorboardRunDataRequest', + ) + + +class WriteTensorboardExperimentDataResponse(proto.Message): + r"""Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. + + """ + + +class WriteTensorboardRunDataRequest(proto.Message): + r"""Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. + + Attributes: + tensorboard_run (str): + Required. The resource name of the TensorboardRun to write + data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + time_series_data (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries data to + write. Values with in a time series are indexed + by their step value. Repeated writes to the same + step will overwrite the existing value for that + step. + The upper limit of data points per write request + is 5000. + """ + + tensorboard_run = proto.Field( + proto.STRING, + number=1, + ) + time_series_data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardRunDataResponse(proto.Message): + r"""Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. + + """ + + +class ExportTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + filter (str): + Exports the TensorboardTimeSeries' data that + match the filter expression. + page_size (int): + The maximum number of data points to return per page. The + default page_size will be 1000. Values must be between 1 and + 10000. Values above 10000 will be coerced to 10000. + page_token (str): + A page token, received from a previous + [TensorboardService.ExportTensorboardTimeSeries][] call. + Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ExportTensorboardTimeSeries][] must + match the call that provided the page token. + order_by (str): + Field to use to sort the + TensorboardTimeSeries' data. By default, + TensorboardTimeSeries' data will be returned in + a pseudo random order. + """ + + tensorboard_time_series = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + + +class ExportTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + time_series_data_points (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesDataPoint]): + The returned time series data points. + next_page_token (str): + A token, which can be sent as + [ExportTensorboardTimeSeriesRequest.page_token][] to + retrieve the next page. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + time_series_data_points = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesDataPoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform create Tensorboard. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UpdateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform update Tensorboard. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py new file mode 100644 index 0000000000..eb4e087ca7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'TensorboardTimeSeries', + }, +) + + +class TensorboardTimeSeries(proto.Message): + r"""TensorboardTimeSeries maps to times series produced in + training runs + + Attributes: + name (str): + Output only. Name of the + TensorboardTimeSeries. + display_name (str): + Required. User provided name of this + TensorboardTimeSeries. This value should be + unique among all TensorboardTimeSeries resources + belonging to the same TensorboardRun resource + (parent resource). + description (str): + Description of this TensorboardTimeSeries. + value_type (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. Type of + TensorboardTimeSeries value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was last updated. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + plugin_name (str): + Immutable. Name of the plugin this time + series pertain to. Such as Scalar, Tensor, Blob + plugin_data (bytes): + Data of the current plugin, with the size + limited to 65KB. + metadata (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.Metadata): + Output only. Scalar, Tensor, or Blob metadata + for this TensorboardTimeSeries. + """ + class ValueType(proto.Enum): + r"""An enum representing the value type of a + TensorboardTimeSeries. + """ + VALUE_TYPE_UNSPECIFIED = 0 + SCALAR = 1 + TENSOR = 2 + BLOB_SEQUENCE = 3 + + class Metadata(proto.Message): + r"""Describes metadata for a TensorboardTimeSeries. + + Attributes: + max_step (int): + Output only. Max step index of all data + points within a TensorboardTimeSeries. + max_wall_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Max wall clock timestamp of all + data points within a TensorboardTimeSeries. + max_blob_sequence_length (int): + Output only. The largest blob sequence length (number of + blobs) of all data points in this time series, if its + ValueType is BLOB_SEQUENCE. + """ + + max_step = proto.Field( + proto.INT64, + number=1, + ) + max_wall_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + max_blob_sequence_length = proto.Field( + proto.INT64, + number=3, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + value_type = proto.Field( + proto.ENUM, + number=4, + enum=ValueType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + plugin_name = proto.Field( + proto.STRING, + number=8, + ) + plugin_data = proto.Field( + proto.BYTES, + number=9, + ) + metadata = proto.Field( + proto.MESSAGE, + number=10, + message=Metadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py new file mode 100644 index 0000000000..9bf0c93122 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -0,0 +1,651 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import pipeline_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'TrainingPipeline', + 'InputDataConfig', + 'FractionSplit', + 'FilterSplit', + 'PredefinedSplit', + 'TimestampSplit', + 'StratifiedSplit', + }, +) + + +class TrainingPipeline(proto.Message): + r"""The TrainingPipeline orchestrates tasks associated with training a + Model. It always executes the training task, and optionally may also + export data from Vertex AI's Dataset which becomes the training + input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + Attributes: + name (str): + Output only. Resource name of the + TrainingPipeline. + display_name (str): + Required. The user-defined name of this + TrainingPipeline. + input_data_config (google.cloud.aiplatform_v1.types.InputDataConfig): + Specifies Vertex AI owned input data that may be used for + training the Model. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] + should make clear whether this config is used and if there + are any special requirements on how it should be filled. If + nothing about this config is mentioned in the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], + then it should be assumed that the TrainingPipeline does not + depend on this configuration. + training_task_definition (str): + Required. A Google Cloud Storage path to the + YAML file that defines the training task which + is responsible for producing the model artifact, + and may also include additional auxiliary work. + The definition files that can be used here are + found in + gs://google-cloud-aiplatform/schema/trainingjob/definition/. + Note: The URI given on output will be immutable + and probably different, including the URI + scheme, than the one given on input. The output + URI will point to a location where the user only + has a read access. + training_task_inputs (google.protobuf.struct_pb2.Value): + Required. The training task's parameter(s), as specified in + the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s + ``inputs``. + training_task_metadata (google.protobuf.struct_pb2.Value): + Output only. The metadata information as specified in the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s + ``metadata``. This metadata is an auxiliary runtime and + final information about the training task. While the + pipeline is running this information is populated only at a + best effort basis. Only present if the pipeline's + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] + contains ``metadata`` object. + model_to_upload (google.cloud.aiplatform_v1.types.Model): + Describes the Model that may be uploaded (via + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]) + by this TrainingPipeline. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] + should make clear whether this Model description should be + populated, and if there are any special requirements + regarding how it should be filled. If nothing is mentioned + in the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], + then it should be assumed that this field should not be + filled and the training task either uploads the Model + without a need of this information, or that training task + does not support uploading a Model as part of the pipeline. + When the Pipeline's state becomes + ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been + uploaded into Vertex AI, then the model_to_upload's resource + [name][google.cloud.aiplatform.v1.Model.name] is populated. + The Model is always uploaded into the Project and Location + in which this pipeline is. + model_id (str): + Optional. The ID to use for the uploaded Model, which will + become the final component of the model resource name. + + This value may be up to 63 characters, and valid characters + are ``[a-z0-9_-]``. The first character cannot be a number + or hyphen. + parent_model (str): + Optional. When specify this field, the ``model_to_upload`` + will not be uploaded as a new model, instead, it will become + a new version of this ``parent_model``. + state (google.cloud.aiplatform_v1.types.PipelineState): + Output only. The detailed state of the + pipeline. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the pipeline's state is + ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline for the first + time entered the ``PIPELINE_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline entered any of + the following states: ``PIPELINE_STATE_SUCCEEDED``, + ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was most recently updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize TrainingPipelines. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a TrainingPipeline. + If set, this TrainingPipeline will be secured by this key. + + Note: Model trained by this TrainingPipeline is also secured + by this key if + [model_to_upload][google.cloud.aiplatform.v1.TrainingPipeline.encryption_spec] + is not set separately. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + input_data_config = proto.Field( + proto.MESSAGE, + number=3, + message='InputDataConfig', + ) + training_task_definition = proto.Field( + proto.STRING, + number=4, + ) + training_task_inputs = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + training_task_metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + model_to_upload = proto.Field( + proto.MESSAGE, + number=7, + message=model.Model, + ) + model_id = proto.Field( + proto.STRING, + number=22, + ) + parent_model = proto.Field( + proto.STRING, + number=21, + ) + state = proto.Field( + proto.ENUM, + number=9, + enum=pipeline_state.PipelineState, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=18, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class InputDataConfig(proto.Message): + r"""Specifies Vertex AI owned input data to be used for training, + and possibly evaluating, the Model. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + fraction_split (google.cloud.aiplatform_v1.types.FractionSplit): + Split based on fractions defining the size of + each set. + + This field is a member of `oneof`_ ``split``. + filter_split (google.cloud.aiplatform_v1.types.FilterSplit): + Split based on the provided filters for each + set. + + This field is a member of `oneof`_ ``split``. + predefined_split (google.cloud.aiplatform_v1.types.PredefinedSplit): + Supported only for tabular Datasets. + Split based on a predefined key. + + This field is a member of `oneof`_ ``split``. + timestamp_split (google.cloud.aiplatform_v1.types.TimestampSplit): + Supported only for tabular Datasets. + Split based on the timestamp of the input data + pieces. + + This field is a member of `oneof`_ ``split``. + stratified_split (google.cloud.aiplatform_v1.types.StratifiedSplit): + Supported only for tabular Datasets. + Split based on the distribution of the specified + column. + + This field is a member of `oneof`_ ``split``. + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location where the training data is to be + written to. In the given directory a new directory is + created with name: + ``dataset---`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All training input data is written into that + directory. + + The Vertex AI environment variables representing Cloud + Storage data URIs are represented in the Cloud Storage + wildcard format to support sharded data. e.g.: + "gs://.../training-*.jsonl" + + - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for + tabular data + + - AIP_TRAINING_DATA_URI = + "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" + + - AIP_VALIDATION_DATA_URI = + "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" + + - AIP_TEST_DATA_URI = + "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". + + This field is a member of `oneof`_ ``destination``. + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + Only applicable to custom training with tabular Dataset with + BigQuery source. + + The BigQuery project location where the training data is to + be written to. In the given project a new dataset is created + with name + ``dataset___`` + where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All + training input data is written into that dataset. In the + dataset three tables are created, ``training``, + ``validation`` and ``test``. + + - AIP_DATA_FORMAT = "bigquery". + + - AIP_TRAINING_DATA_URI = + "bigquery_destination.dataset\_\ **\ .training" + + - AIP_VALIDATION_DATA_URI = + "bigquery_destination.dataset\_\ **\ .validation" + + - AIP_TEST_DATA_URI = + "bigquery_destination.dataset\_\ **\ .test". + + This field is a member of `oneof`_ ``destination``. + dataset_id (str): + Required. The ID of the Dataset in the same Project and + Location which data will be used to train the Model. The + Dataset must use schema compatible with Model being trained, + and what is compatible should be described in the used + TrainingPipeline's [training_task_definition] + [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]. + For tabular Datasets, all their data is exported to + training, to pick and choose from. + annotations_filter (str): + Applicable only to Datasets that have DataItems and + Annotations. + + A filter on Annotations of the Dataset. Only Annotations + that both match this filter and belong to DataItems not + ignored by the split method are used in respectively + training, validation or test role, depending on the role of + the DataItem they are on (for the auto-assigned that role is + decided by Vertex AI). A filter with same syntax as the one + used in + [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations] + may be used, but note here it filters across all Annotations + of the Dataset, and not just within a single DataItem. + annotation_schema_uri (str): + Applicable only to custom training with Datasets that have + DataItems and Annotations. + + Cloud Storage URI that points to a YAML file describing the + annotation schema. The schema is defined as an OpenAPI 3.0.2 + `Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/ , + note that the chosen schema must be consistent with + [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + of the Dataset specified by + [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id]. + + Only Annotations that both match this schema and belong to + DataItems not ignored by the split method are used in + respectively training, validation or test role, depending on + the role of the DataItem they are on. + + When used in conjunction with + [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter], + the Annotations used for training are filtered by both + [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter] + and + [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]. + """ + + fraction_split = proto.Field( + proto.MESSAGE, + number=2, + oneof='split', + message='FractionSplit', + ) + filter_split = proto.Field( + proto.MESSAGE, + number=3, + oneof='split', + message='FilterSplit', + ) + predefined_split = proto.Field( + proto.MESSAGE, + number=4, + oneof='split', + message='PredefinedSplit', + ) + timestamp_split = proto.Field( + proto.MESSAGE, + number=5, + oneof='split', + message='TimestampSplit', + ) + stratified_split = proto.Field( + proto.MESSAGE, + number=12, + oneof='split', + message='StratifiedSplit', + ) + gcs_destination = proto.Field( + proto.MESSAGE, + number=8, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=10, + oneof='destination', + message=io.BigQueryDestination, + ) + dataset_id = proto.Field( + proto.STRING, + number=1, + ) + annotations_filter = proto.Field( + proto.STRING, + number=6, + ) + annotation_schema_uri = proto.Field( + proto.STRING, + number=9, + ) + + +class FractionSplit(proto.Message): + r"""Assigns the input data to training, validation, and test sets as per + the given fractions. Any of ``training_fraction``, + ``validation_fraction`` and ``test_fraction`` may optionally be + provided, they must sum to up to 1. If the provided ones sum to less + than 1, the remainder is assigned to sets as decided by Vertex AI. + If none of the fractions are set, by default roughly 80% of data is + used for training, 10% for validation, and 10% for test. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + + +class FilterSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the given filters, data pieces not matched by any + filter are ignored. Currently only supported for Datasets + containing DataItems. + If any of the filters in this message are to match nothing, then + they can be set as '-' (the minus sign). + + Supported only for unstructured Datasets. + + Attributes: + training_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to train the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + validation_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to validate the Model. A + filter with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + test_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to test the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + """ + + training_filter = proto.Field( + proto.STRING, + number=1, + ) + validation_filter = proto.Field( + proto.STRING, + number=2, + ) + test_filter = proto.Field( + proto.STRING, + number=3, + ) + + +class PredefinedSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the value of a provided key. + + Supported only for tabular Datasets. + + Attributes: + key (str): + Required. The key is a name of one of the Dataset's data + columns. The value of the key (either the label's value or + value in the column) must be one of {``training``, + ``validation``, ``test``}, and it defines to which set the + given piece of data is assigned. If for a piece of data the + key is not present or has an invalid value, that piece is + ignored by the pipeline. + """ + + key = proto.Field( + proto.STRING, + number=1, + ) + + +class TimestampSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the Dataset's data + columns. The values of the key (the values in the column) + must be in RFC 3339 ``date-time`` format, where + ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If + for a piece of data the key is not present or has an invalid + value, that piece is ignored by the pipeline. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + key = proto.Field( + proto.STRING, + number=4, + ) + + +class StratifiedSplit(proto.Message): + r"""Assigns input data to the training, validation, and test sets so + that the distribution of values found in the categorical column (as + specified by the ``key`` field) is mirrored within each split. The + fraction values determine the relative sizes of the splits. + + For example, if the specified column has three values, with 50% of + the rows having value "A", 25% value "B", and 25% value "C", and the + split fractions are specified as 80/10/10, then the training set + will constitute 80% of the training data, with about 50% of the + training set rows having the value "A" for the specified column, + about 25% having the value "B", and about 25% having the value "C". + + Only the top 500 occurring values are used; any values not in the + top 500 values are randomly assigned to a split. If less than three + rows contain a specific value, those rows are randomly assigned. + + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the + Dataset's data columns. The key provided must be + for a categorical column. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + key = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py new file mode 100644 index 0000000000..591bf5dedc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + }, +) + + +class BoolArray(proto.Message): + r"""A list of boolean values. + + Attributes: + values (Sequence[bool]): + A list of bool values. + """ + + values = proto.RepeatedField( + proto.BOOL, + number=1, + ) + + +class DoubleArray(proto.Message): + r"""A list of double values. + + Attributes: + values (Sequence[float]): + A list of bool values. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + +class Int64Array(proto.Message): + r"""A list of int64 values. + + Attributes: + values (Sequence[int]): + A list of int64 values. + """ + + values = proto.RepeatedField( + proto.INT64, + number=1, + ) + + +class StringArray(proto.Message): + r"""A list of string values. + + Attributes: + values (Sequence[str]): + A list of string values. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/unmanaged_container_model.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/unmanaged_container_model.py new file mode 100644 index 0000000000..6a567ce7f5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/unmanaged_container_model.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import model + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'UnmanagedContainerModel', + }, +) + + +class UnmanagedContainerModel(proto.Message): + r"""Contains model information necessary to perform batch + prediction without requiring a full model import. + + Attributes: + artifact_uri (str): + The path to the directory containing the + Model artifact and any of its supporting files. + predict_schemata (google.cloud.aiplatform_v1.types.PredictSchemata): + Contains the schemata used in Model's + predictions and explanations + container_spec (google.cloud.aiplatform_v1.types.ModelContainerSpec): + Input only. The specification of the + container that is to be used when deploying this + Model. + """ + + artifact_uri = proto.Field( + proto.STRING, + number=1, + ) + predict_schemata = proto.Field( + proto.MESSAGE, + number=2, + message=model.PredictSchemata, + ) + container_spec = proto.Field( + proto.MESSAGE, + number=3, + message=model.ModelContainerSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py new file mode 100644 index 0000000000..cd1b51cc84 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'UserActionReference', + }, +) + + +class UserActionReference(proto.Message): + r"""References an API call. It contains more information about + long running operation and Jobs that are triggered by the API + call. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + operation (str): + For API calls that return a long running + operation. Resource name of the long running + operation. Format: + 'projects/{project}/locations/{location}/operations/{operation}' + + This field is a member of `oneof`_ ``reference``. + data_labeling_job (str): + For API calls that start a LabelingJob. Resource name of the + LabelingJob. Format: + 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' + + This field is a member of `oneof`_ ``reference``. + method (str): + The method name of the API RPC call. For + example, + "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". + """ + + operation = proto.Field( + proto.STRING, + number=1, + oneof='reference', + ) + data_labeling_job = proto.Field( + proto.STRING, + number=2, + oneof='reference', + ) + method = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py new file mode 100644 index 0000000000..3fe93dfe52 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Value', + }, +) + + +class Value(proto.Message): + r"""Value is the value of the field. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + int_value (int): + An integer value. + + This field is a member of `oneof`_ ``value``. + double_value (float): + A double value. + + This field is a member of `oneof`_ ``value``. + string_value (str): + A string value. + + This field is a member of `oneof`_ ``value``. + """ + + int_value = proto.Field( + proto.INT64, + number=1, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=2, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=3, + oneof='value', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py new file mode 100644 index 0000000000..377201810f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py @@ -0,0 +1,589 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import operation +from google.cloud.aiplatform_v1.types import study as gca_study +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'GetStudyRequest', + 'CreateStudyRequest', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'DeleteStudyRequest', + 'LookupStudyRequest', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', + 'SuggestTrialsMetadata', + 'CreateTrialRequest', + 'GetTrialRequest', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'AddTrialMeasurementRequest', + 'CompleteTrialRequest', + 'DeleteTrialRequest', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CheckTrialEarlyStoppingStateMetatdata', + 'StopTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + }, +) + + +class GetStudyRequest(proto.Message): + r"""Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1.VizierService.GetStudy]. + + Attributes: + name (str): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateStudyRequest(proto.Message): + r"""Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1.VizierService.CreateStudy]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + CustomJob in. Format: + ``projects/{project}/locations/{location}`` + study (google.cloud.aiplatform_v1.types.Study): + Required. The Study configuration used to + create the Study. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + study = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Study, + ) + + +class ListStudiesRequest(proto.Message): + r"""Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + page_token (str): + Optional. A page token to request the next + page of results. If unspecified, there are no + subsequent pages. + page_size (int): + Optional. The maximum number of studies to + return per "page" of results. If unspecified, + service will pick an appropriate default. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + + +class ListStudiesResponse(proto.Message): + r"""Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. + + Attributes: + studies (Sequence[google.cloud.aiplatform_v1.types.Study]): + The studies associated with the project. + next_page_token (str): + Passes this token as the ``page_token`` field of the request + for a subsequent call. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + studies = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Study, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteStudyRequest(proto.Message): + r"""Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1.VizierService.DeleteStudy]. + + Attributes: + name (str): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class LookupStudyRequest(proto.Message): + r"""Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1.VizierService.LookupStudy]. + + Attributes: + parent (str): + Required. The resource name of the Location to get the Study + from. Format: ``projects/{project}/locations/{location}`` + display_name (str): + Required. The user-defined display name of + the Study + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + + +class SuggestTrialsRequest(proto.Message): + r"""Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. + + Attributes: + parent (str): + Required. The project and location that the Study belongs + to. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + suggestion_count (int): + Required. The number of suggestions + requested. + client_id (str): + Required. The identifier of the client that is requesting + the suggestion. + + If multiple SuggestTrialsRequests have the same + ``client_id``, the service will return the identical + suggested Trial if the Trial is pending, and provide a new + Trial if the last suggested Trial was completed. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + suggestion_count = proto.Field( + proto.INT32, + number=2, + ) + client_id = proto.Field( + proto.STRING, + number=3, + ) + + +class SuggestTrialsResponse(proto.Message): + r"""Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. + + Attributes: + trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): + A list of Trials. + study_state (google.cloud.aiplatform_v1.types.Study.State): + The state of the Study. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which operation processing + completed. + """ + + trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + study_state = proto.Field( + proto.ENUM, + number=2, + enum=gca_study.Study.State, + ) + start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class SuggestTrialsMetadata(proto.Message): + r"""Details of operations that perform Trials suggestion. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for suggesting Trials. + client_id (str): + The identifier of the client that is requesting the + suggestion. + + If multiple SuggestTrialsRequests have the same + ``client_id``, the service will return the identical + suggested Trial if the Trial is pending, and provide a new + Trial if the last suggested Trial was completed. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + client_id = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateTrialRequest(proto.Message): + r"""Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1.VizierService.CreateTrial]. + + Attributes: + parent (str): + Required. The resource name of the Study to create the Trial + in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + trial (google.cloud.aiplatform_v1.types.Trial): + Required. The Trial to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + trial = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Trial, + ) + + +class GetTrialRequest(proto.Message): + r"""Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1.VizierService.GetTrial]. + + Attributes: + name (str): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTrialsRequest(proto.Message): + r"""Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. + + Attributes: + parent (str): + Required. The resource name of the Study to list the Trial + from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + page_token (str): + Optional. A page token to request the next + page of results. If unspecified, there are no + subsequent pages. + page_size (int): + Optional. The number of Trials to retrieve + per "page" of results. If unspecified, the + service will pick an appropriate default. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + + +class ListTrialsResponse(proto.Message): + r"""Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. + + Attributes: + trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): + The Trials associated with the Study. + next_page_token (str): + Pass this token as the ``page_token`` field of the request + for a subsequent call. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class AddTrialMeasurementRequest(proto.Message): + r"""Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement]. + + Attributes: + trial_name (str): + Required. The name of the trial to add measurement. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + measurement (google.cloud.aiplatform_v1.types.Measurement): + Required. The measurement to be added to a + Trial. + """ + + trial_name = proto.Field( + proto.STRING, + number=1, + ) + measurement = proto.Field( + proto.MESSAGE, + number=3, + message=gca_study.Measurement, + ) + + +class CompleteTrialRequest(proto.Message): + r"""Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1.VizierService.CompleteTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + final_measurement (google.cloud.aiplatform_v1.types.Measurement): + Optional. If provided, it will be used as the completed + Trial's final_measurement; Otherwise, the service will + auto-select a previously reported measurement as the + final-measurement + trial_infeasible (bool): + Optional. True if the Trial cannot be run with the given + Parameter, and final_measurement will be ignored. + infeasible_reason (str): + Optional. A human readable reason why the trial was + infeasible. This should only be provided if + ``trial_infeasible`` is true. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + final_measurement = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Measurement, + ) + trial_infeasible = proto.Field( + proto.BOOL, + number=3, + ) + infeasible_reason = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteTrialRequest(proto.Message): + r"""Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1.VizierService.DeleteTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CheckTrialEarlyStoppingStateRequest(proto.Message): + r"""Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. + + Attributes: + trial_name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + trial_name = proto.Field( + proto.STRING, + number=1, + ) + + +class CheckTrialEarlyStoppingStateResponse(proto.Message): + r"""Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. + + Attributes: + should_stop (bool): + True if the Trial should stop. + """ + + should_stop = proto.Field( + proto.BOOL, + number=1, + ) + + +class CheckTrialEarlyStoppingStateMetatdata(proto.Message): + r"""This message will be placed in the metadata field of a + google.longrunning.Operation associated with a + CheckTrialEarlyStoppingState request. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for suggesting Trials. + study (str): + The name of the Study that the Trial belongs + to. + trial (str): + The Trial name. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + study = proto.Field( + proto.STRING, + number=2, + ) + trial = proto.Field( + proto.STRING, + number=3, + ) + + +class StopTrialRequest(proto.Message): + r"""Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1.VizierService.StopTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListOptimalTrialsRequest(proto.Message): + r"""Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. + + Attributes: + parent (str): + Required. The name of the Study that the + optimal Trial belongs to. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class ListOptimalTrialsResponse(proto.Message): + r"""Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. + + Attributes: + optimal_trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): + The pareto-optimal Trials for multiple objective Study or + the optimal trial for single objective Study. The definition + of pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + """ + + optimal_trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py new file mode 100644 index 0000000000..b99357239d --- /dev/null +++ b/owl-bot-staging/v1/noxfile.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +ALL_PYTHON = [ + "3.6", + "3.7", + "3.8", + "3.9", + "3.10", +] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + +BLACK_VERSION = "black==19.10b0" +BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] +DEFAULT_PYTHON_VERSION = "3.9" + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", + "blacken", + "lint", + "lint_setup_py", +] + +@nox.session(python=ALL_PYTHON) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=ALL_PYTHON) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx==4.0.1", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run("flake8", "google", "tests", "samples") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_async.py new file mode 100644 index 0000000000..34d2c2cb05 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_CreateDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_CreateDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_sync.py new file mode 100644 index 0000000000..d8756ab0e7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_create_dataset_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_CreateDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_CreateDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_async.py new file mode 100644 index 0000000000..8a9f058d17 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_DeleteDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_DeleteDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_sync.py new file mode 100644 index 0000000000..b8d91fac1f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_delete_dataset_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_DeleteDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_DeleteDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_async.py new file mode 100644 index 0000000000..c3f18a5e52 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ExportData_async] +from google.cloud import aiplatform_v1 + + +async def sample_export_data(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + export_config = aiplatform_v1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_ExportData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_sync.py new file mode 100644 index 0000000000..bf501e1f3f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ExportData_sync] +from google.cloud import aiplatform_v1 + + +def sample_export_data(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + export_config = aiplatform_v1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_ExportData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py new file mode 100644 index 0000000000..1e56b2e25b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_GetAnnotationSpec_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_GetAnnotationSpec_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py new file mode 100644 index 0000000000..74ace5d895 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_GetAnnotationSpec_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_GetAnnotationSpec_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_async.py new file mode 100644 index 0000000000..3f8f590106 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_GetDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_GetDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_sync.py new file mode 100644 index 0000000000..21ded79723 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_get_dataset_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_GetDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_GetDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_async.py new file mode 100644 index 0000000000..7ecfbdb59d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ImportData_async] +from google.cloud import aiplatform_v1 + + +async def sample_import_data(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_ImportData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_sync.py new file mode 100644 index 0000000000..2e0cced2ad --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_import_data_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ImportData_sync] +from google.cloud import aiplatform_v1 + + +def sample_import_data(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_ImportData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_async.py new file mode 100644 index 0000000000..3d0ee2fc8d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ListAnnotations_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_annotations(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DatasetService_ListAnnotations_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_sync.py new file mode 100644 index 0000000000..e476b1c5f5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_annotations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ListAnnotations_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_annotations(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DatasetService_ListAnnotations_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_async.py new file mode 100644 index 0000000000..2de31c8715 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ListDataItems_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_data_items(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DatasetService_ListDataItems_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_sync.py new file mode 100644 index 0000000000..2cb7f59d01 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_data_items_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ListDataItems_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_data_items(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DatasetService_ListDataItems_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_async.py new file mode 100644 index 0000000000..7a634e82b2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ListDatasets_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_datasets(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DatasetService_ListDatasets_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_sync.py new file mode 100644 index 0000000000..5934bde144 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_list_datasets_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_ListDatasets_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_datasets(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DatasetService_ListDatasets_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_async.py new file mode 100644 index 0000000000..1fea8b45c3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_UpdateDataset_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_UpdateDataset_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_sync.py new file mode 100644 index 0000000000..170b3939d8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_dataset_service_update_dataset_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DatasetService_UpdateDataset_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_dataset(): + # Create a client + client = aiplatform_v1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DatasetService_UpdateDataset_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_async.py new file mode 100644 index 0000000000..bc238c831a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_CreateEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_CreateEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py new file mode 100644 index 0000000000..81a24e5df0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_CreateEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_CreateEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py new file mode 100644 index 0000000000..da1f35430b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_DeleteEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_DeleteEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py new file mode 100644 index 0000000000..7042baedf8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_DeleteEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_DeleteEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_async.py new file mode 100644 index 0000000000..9610eae986 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_DeployModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_deploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_DeployModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_sync.py new file mode 100644 index 0000000000..448585048b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_deploy_model_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_DeployModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_deploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_DeployModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_async.py new file mode 100644 index 0000000000..741a75dbdd --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_GetEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_GetEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py new file mode 100644 index 0000000000..7a39004427 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_GetEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_GetEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_async.py new file mode 100644 index 0000000000..297abfff5a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_ListEndpoints_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_endpoints(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_EndpointService_ListEndpoints_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py new file mode 100644 index 0000000000..5c4faf7931 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_ListEndpoints_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_endpoints(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_EndpointService_ListEndpoints_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_async.py new file mode 100644 index 0000000000..88258ff0b7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_UndeployModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_undeploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_UndeployModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py new file mode 100644 index 0000000000..2178e3340f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_UndeployModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_undeploy_model(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_UndeployModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_async.py new file mode 100644 index 0000000000..c1039cda24 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_UpdateEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = await client.update_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_UpdateEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py new file mode 100644 index 0000000000..8b717bf344 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_EndpointService_UpdateEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_endpoint(): + # Create a client + client = aiplatform_v1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = client.update_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_EndpointService_UpdateEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py new file mode 100644 index 0000000000..7de0f610c9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = await client.read_feature_values(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py new file mode 100644 index 0000000000..9693bb1a66 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = client.read_feature_values(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py new file mode 100644 index 0000000000..91e74ea89c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = await client.streaming_read_feature_values(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py new file mode 100644 index 0000000000..927a65a41b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = client.streaming_read_feature_values(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_async.py new file mode 100644 index 0000000000..446f602cb6 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_create_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py new file mode 100644 index 0000000000..96f958a186 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_create_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py new file mode 100644 index 0000000000..6f471a7d5e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py new file mode 100644 index 0000000000..4926d5a69f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_async.py new file mode 100644 index 0000000000..3015072317 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_CreateEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_CreateEntityType_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py new file mode 100644 index 0000000000..29189412d3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_CreateEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_CreateEntityType_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_async.py new file mode 100644 index 0000000000..671177a883 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_CreateFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_CreateFeature_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_sync.py new file mode 100644 index 0000000000..cdf4b5e9ad --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_feature_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_CreateFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_CreateFeature_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_async.py new file mode 100644 index 0000000000..fd5fdcda77 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py new file mode 100644 index 0000000000..7457a55007 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py new file mode 100644 index 0000000000..09672d6404 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py new file mode 100644 index 0000000000..0a4bbe5610 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_async.py new file mode 100644 index 0000000000..53e60c17f5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_DeleteFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_DeleteFeature_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_sync.py new file mode 100644 index 0000000000..f03ea625be --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_feature_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_DeleteFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_DeleteFeature_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py new file mode 100644 index 0000000000..52de9bc84c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py new file mode 100644 index 0000000000..8556329614 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_async.py new file mode 100644 index 0000000000..f60187c790 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_export_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py new file mode 100644 index 0000000000..916830e11e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_export_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + destination = aiplatform_v1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_async.py new file mode 100644 index 0000000000..795c12d50b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_GetEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_GetEntityType_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py new file mode 100644 index 0000000000..322f1a8c66 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_GetEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = client.get_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_GetEntityType_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_async.py new file mode 100644 index 0000000000..c3274b019b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_GetFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = await client.get_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_GetFeature_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_sync.py new file mode 100644 index 0000000000..ea59add0ad --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_feature_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_GetFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = client.get_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_GetFeature_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_async.py new file mode 100644 index 0000000000..16a332c6fa --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_featurestore(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py new file mode 100644 index 0000000000..1de46d7414 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_featurestore(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_async.py new file mode 100644 index 0000000000..2c6c0dfd7d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_async] +from google.cloud import aiplatform_v1 + + +async def sample_import_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py new file mode 100644 index 0000000000..1bb06697f3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_sync] +from google.cloud import aiplatform_v1 + + +def sample_import_feature_values(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_async.py new file mode 100644 index 0000000000..36fa20d54c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_entity_types(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py new file mode 100644 index 0000000000..a7a97fa236 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_entity_types(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_async.py new file mode 100644 index 0000000000..493cf3b272 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ListFeatures_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ListFeatures_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_sync.py new file mode 100644 index 0000000000..67e53820f2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ListFeatures_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ListFeatures_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_async.py new file mode 100644 index 0000000000..a6e7fdda8f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_featurestores(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py new file mode 100644 index 0000000000..21bafb70b3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_featurestores(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_async.py new file mode 100644 index 0000000000..bf9ec39b73 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_SearchFeatures_async] +from google.cloud import aiplatform_v1 + + +async def sample_search_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_SearchFeatures_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_sync.py new file mode 100644 index 0000000000..41892e64b4 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_search_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_SearchFeatures_sync] +from google.cloud import aiplatform_v1 + + +def sample_search_features(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_SearchFeatures_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_async.py new file mode 100644 index 0000000000..97e48bcbbf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateEntityTypeRequest( + ) + + # Make the request + response = await client.update_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py new file mode 100644 index 0000000000..431c93fa91 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_entity_type(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateEntityTypeRequest( + ) + + # Make the request + response = client.update_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_async.py new file mode 100644 index 0000000000..609b6e9e61 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_UpdateFeature_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = await client.update_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_UpdateFeature_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_sync.py new file mode 100644 index 0000000000..d86da09498 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_feature_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_UpdateFeature_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_feature(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = client.update_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_UpdateFeature_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_async.py new file mode 100644 index 0000000000..83501ffe96 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py new file mode 100644 index 0000000000..64c6fd147a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_featurestore(): + # Create a client + client = aiplatform_v1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py new file mode 100644 index 0000000000..0b77503e53 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py new file mode 100644 index 0000000000..bfee5ecfce --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py new file mode 100644 index 0000000000..0bec262cd3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py new file mode 100644 index 0000000000..885aa0c4d2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py new file mode 100644 index 0000000000..45b23cf1bd --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_DeployIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_deploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_DeployIndex_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py new file mode 100644 index 0000000000..0c9402d026 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_DeployIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_deploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_DeployIndex_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py new file mode 100644 index 0000000000..db0f322dce --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py new file mode 100644 index 0000000000..5935ec4363 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py new file mode 100644 index 0000000000..61510c311e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py new file mode 100644 index 0000000000..05c733d458 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py new file mode 100644 index 0000000000..032e0807a0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MutateDeployedIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py new file mode 100644 index 0000000000..033699b684 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MutateDeployedIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py new file mode 100644 index 0000000000..725285529b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_UndeployIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_undeploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_UndeployIndex_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py new file mode 100644 index 0000000000..efd09cfeda --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_UndeployIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_undeploy_index(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_UndeployIndex_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py new file mode 100644 index 0000000000..c670d1cb32 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = await client.update_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py new file mode 100644 index 0000000000..98669d086c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = client.update_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_async.py new file mode 100644 index 0000000000..f4b9614d41 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_CreateIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_CreateIndex_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_sync.py new file mode 100644 index 0000000000..60dcc20299 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_create_index_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_CreateIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_CreateIndex_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_async.py new file mode 100644 index 0000000000..6f8ab5eae7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_DeleteIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_DeleteIndex_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_sync.py new file mode 100644 index 0000000000..68d981526c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_delete_index_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_DeleteIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_DeleteIndex_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_async.py new file mode 100644 index 0000000000..7d01bf3a29 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_GetIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_GetIndex_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_sync.py new file mode 100644 index 0000000000..662e8c9bb0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_get_index_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_GetIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = client.get_index(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_GetIndex_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_async.py new file mode 100644 index 0000000000..df114f1694 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_ListIndexes_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_indexes(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_IndexService_ListIndexes_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_sync.py new file mode 100644 index 0000000000..9048e9b3cd --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_list_indexes_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_ListIndexes_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_indexes(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_IndexService_ListIndexes_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_async.py new file mode 100644 index 0000000000..a7845d43a7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_UpdateIndex_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_index(): + # Create a client + client = aiplatform_v1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_UpdateIndex_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_sync.py new file mode 100644 index 0000000000..315e5aface --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_index_service_update_index_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_IndexService_UpdateIndex_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_index(): + # Create a client + client = aiplatform_v1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_IndexService_UpdateIndex_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py new file mode 100644 index 0000000000..99f2d28b9b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelBatchPredictionJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py new file mode 100644 index 0000000000..b22147b454 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_async.py new file mode 100644 index 0000000000..aedd92111b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_custom_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelCustomJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_sync.py new file mode 100644 index 0000000000..4f30c4a526 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_custom_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_custom_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelCustomJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py new file mode 100644 index 0000000000..bc23ab2ebb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelDataLabelingJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py new file mode 100644 index 0000000000..0e474f9c72 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelDataLabelingJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..48deaf9ec9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..d0928e186f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py new file mode 100644 index 0000000000..aa968a2d42 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = await client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateBatchPredictionJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py new file mode 100644 index 0000000000..b178256c10 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_async.py new file mode 100644 index 0000000000..0964e4a2ea --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = await client.create_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateCustomJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_sync.py new file mode 100644 index 0000000000..577f365f97 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_custom_job_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = client.create_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateCustomJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_async.py new file mode 100644 index 0000000000..810ee2a58a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = await client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateDataLabelingJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py new file mode 100644 index 0000000000..841de30638 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateDataLabelingJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..7e86e06a07 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = await client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..e18b9f5944 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..ce1e65948c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = await client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..c31da8eb9b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py new file mode 100644 index 0000000000..67f4b93ab9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py new file mode 100644 index 0000000000..ddf9ce0a02 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_async.py new file mode 100644 index 0000000000..c9b9eb72b2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteCustomJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_sync.py new file mode 100644 index 0000000000..e4b929844c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_custom_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteCustomJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py new file mode 100644 index 0000000000..32bd9e7ebf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py new file mode 100644 index 0000000000..3bab4949b3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..2eb5320f44 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..17ba4828fa --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..4522605729 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..6a78a8973f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py new file mode 100644 index 0000000000..4efd8ca1b8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetBatchPredictionJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetBatchPredictionJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py new file mode 100644 index 0000000000..5a6ecb7dbb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_async.py new file mode 100644 index 0000000000..0d7abd11c8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetCustomJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetCustomJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_sync.py new file mode 100644 index 0000000000..9dc3ba468b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_custom_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetCustomJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_custom_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetCustomJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_async.py new file mode 100644 index 0000000000..2050601b23 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetDataLabelingJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetDataLabelingJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py new file mode 100644 index 0000000000..89b82894bf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetDataLabelingJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetDataLabelingJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..6a409e7d31 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..0931ff5419 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..16f2317bf1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..66888d2bff --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py new file mode 100644 index 0000000000..710c18549d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py new file mode 100644 index 0000000000..58afa26785 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_async.py new file mode 100644 index 0000000000..4abc6f745f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListCustomJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListCustomJobs_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_sync.py new file mode 100644 index 0000000000..28fc039f6b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_custom_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListCustomJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListCustomJobs_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py new file mode 100644 index 0000000000..f1551703b9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListDataLabelingJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListDataLabelingJobs_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py new file mode 100644 index 0000000000..147d71dfe8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py new file mode 100644 index 0000000000..362fcb40a8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py new file mode 100644 index 0000000000..98bf35c6a3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py new file mode 100644 index 0000000000..c1526e51d5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py new file mode 100644 index 0000000000..7bfa4bc111 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..b16169b7af --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..622d2b48f0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..c354e8081d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..01f7513482 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py new file mode 100644 index 0000000000..c5eb2af9a0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] +from google.cloud import aiplatform_v1 + + +async def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py new file mode 100644 index 0000000000..cc7f4cc271 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] +from google.cloud import aiplatform_v1 + + +def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..ee8df228db --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..5f5e82da6d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py new file mode 100644 index 0000000000..80c217b969 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py new file mode 100644 index 0000000000..74bcf8ea59 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_async.py new file mode 100644 index 0000000000..acab4726a8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_AddContextChildren_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_context_children(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_children(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_AddContextChildren_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_sync.py new file mode 100644 index 0000000000..6398876b76 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_context_children_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_AddContextChildren_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_context_children(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_children(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_AddContextChildren_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_async.py new file mode 100644 index 0000000000..89c4593000 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_AddExecutionEvents_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_execution_events(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.add_execution_events(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_AddExecutionEvents_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_sync.py new file mode 100644 index 0000000000..08ea408a6e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_add_execution_events_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_AddExecutionEvents_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_execution_events(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = client.add_execution_events(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_AddExecutionEvents_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_async.py new file mode 100644 index 0000000000..ff3af7a59e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateArtifact_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_sync.py new file mode 100644 index 0000000000..a6d7bc372d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_artifact_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateArtifact_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_async.py new file mode 100644 index 0000000000..ca9e9cc4da --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateContext_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_sync.py new file mode 100644 index 0000000000..66d45fed18 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_context_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateContext_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_async.py new file mode 100644 index 0000000000..4d27a1999f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateExecution_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_sync.py new file mode 100644 index 0000000000..8551cc9a07 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_execution_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateExecution_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py new file mode 100644 index 0000000000..435b3f49b0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateMetadataSchema_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = await client.create_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateMetadataSchema_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py new file mode 100644 index 0000000000..8d5049d57a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateMetadataSchema_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = client.create_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateMetadataSchema_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_async.py new file mode 100644 index 0000000000..43b162c7cc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateMetadataStore_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateMetadataStore_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py new file mode 100644 index 0000000000..6ed4d538b7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_CreateMetadataStore_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_CreateMetadataStore_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_async.py new file mode 100644 index 0000000000..0782e6c6e9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteArtifact_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_sync.py new file mode 100644 index 0000000000..e319867bd3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_artifact_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteArtifact_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_async.py new file mode 100644 index 0000000000..ca834d7e55 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteContext_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_sync.py new file mode 100644 index 0000000000..88d5592809 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_context_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteContext_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_async.py new file mode 100644 index 0000000000..4292c4d342 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteExecution_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_sync.py new file mode 100644 index 0000000000..9f053cef44 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_execution_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteExecution_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py new file mode 100644 index 0000000000..1d099a727f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteMetadataStore_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteMetadataStore_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py new file mode 100644 index 0000000000..88a154c397 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_DeleteMetadataStore_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_DeleteMetadataStore_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_async.py new file mode 100644 index 0000000000..5e7ebf1c1f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = await client.get_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetArtifact_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_sync.py new file mode 100644 index 0000000000..000f7bf853 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_artifact_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = client.get_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetArtifact_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_async.py new file mode 100644 index 0000000000..76aff63c76 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = await client.get_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetContext_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_sync.py new file mode 100644 index 0000000000..5d3c36ef03 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_context_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = client.get_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetContext_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_async.py new file mode 100644 index 0000000000..0b3d2edd97 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetExecution_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_sync.py new file mode 100644 index 0000000000..f7e6e8ac6c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_execution_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = client.get_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetExecution_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py new file mode 100644 index 0000000000..fb93d9e1e5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetMetadataSchema_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetMetadataSchema_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py new file mode 100644 index 0000000000..84a7704069 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetMetadataSchema_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetMetadataSchema_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_async.py new file mode 100644 index 0000000000..5a34b49bfc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetMetadataStore_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_store(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetMetadataStore_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py new file mode 100644 index 0000000000..d0b94f904a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_GetMetadataStore_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_store(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_GetMetadataStore_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_async.py new file mode 100644 index 0000000000..7c706a7a07 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListArtifacts_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListArtifacts_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_sync.py new file mode 100644 index 0000000000..a79537af49 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_artifacts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListArtifacts_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListArtifacts_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_async.py new file mode 100644 index 0000000000..63fcc14ef0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListContexts_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListContexts_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_sync.py new file mode 100644 index 0000000000..1b4413600f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_contexts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListContexts_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListContexts_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_async.py new file mode 100644 index 0000000000..0c51f41884 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListExecutions_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListExecutions_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_sync.py new file mode 100644 index 0000000000..ba0f85ed82 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_executions_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListExecutions_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListExecutions_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py new file mode 100644 index 0000000000..2868b533b6 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListMetadataSchemas_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListMetadataSchemas_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py new file mode 100644 index 0000000000..b69851488e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListMetadataSchemas_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListMetadataSchemas_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py new file mode 100644 index 0000000000..bc16d158ef --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListMetadataStores_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListMetadataStores_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py new file mode 100644 index 0000000000..2997676bcf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_ListMetadataStores_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MetadataService_ListMetadataStores_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_async.py new file mode 100644 index 0000000000..d07abcb29e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_PurgeArtifacts_async] +from google.cloud import aiplatform_v1 + + +async def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_PurgeArtifacts_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py new file mode 100644 index 0000000000..798eeed330 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_PurgeArtifacts_sync] +from google.cloud import aiplatform_v1 + + +def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_PurgeArtifacts_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_async.py new file mode 100644 index 0000000000..27cd785e74 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_PurgeContexts_async] +from google.cloud import aiplatform_v1 + + +async def sample_purge_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_PurgeContexts_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_sync.py new file mode 100644 index 0000000000..22c2790395 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_contexts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_PurgeContexts_sync] +from google.cloud import aiplatform_v1 + + +def sample_purge_contexts(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_PurgeContexts_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_async.py new file mode 100644 index 0000000000..389555139d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_PurgeExecutions_async] +from google.cloud import aiplatform_v1 + + +async def sample_purge_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_PurgeExecutions_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_sync.py new file mode 100644 index 0000000000..65faab708e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_purge_executions_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_PurgeExecutions_sync] +from google.cloud import aiplatform_v1 + + +def sample_purge_executions(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_PurgeExecutions_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py new file mode 100644 index 0000000000..f3899cedb7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_async] +from google.cloud import aiplatform_v1 + + +async def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = await client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py new file mode 100644 index 0000000000..8cc1e0b3cc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_sync] +from google.cloud import aiplatform_v1 + + +def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py new file mode 100644 index 0000000000..3f7957e959 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_async] +from google.cloud import aiplatform_v1 + + +async def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = await client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py new file mode 100644 index 0000000000..ea51b51346 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_sync] +from google.cloud import aiplatform_v1 + + +def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py new file mode 100644 index 0000000000..1674db77fd --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_async] +from google.cloud import aiplatform_v1 + + +async def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py new file mode 100644 index 0000000000..25b5bee868 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync] +from google.cloud import aiplatform_v1 + + +def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_async.py new file mode 100644 index 0000000000..9449ab0d2d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_UpdateArtifact_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateArtifactRequest( + ) + + # Make the request + response = await client.update_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_UpdateArtifact_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_sync.py new file mode 100644 index 0000000000..2dabbbb514 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_artifact_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_UpdateArtifact_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_artifact(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateArtifactRequest( + ) + + # Make the request + response = client.update_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_UpdateArtifact_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_async.py new file mode 100644 index 0000000000..a3937b1ed7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_UpdateContext_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_context(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateContextRequest( + ) + + # Make the request + response = await client.update_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_UpdateContext_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_sync.py new file mode 100644 index 0000000000..c7b1efb0db --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_context_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_UpdateContext_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_context(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateContextRequest( + ) + + # Make the request + response = client.update_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_UpdateContext_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_async.py new file mode 100644 index 0000000000..1caf21f5e8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_UpdateExecution_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateExecutionRequest( + ) + + # Make the request + response = await client.update_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_UpdateExecution_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_sync.py new file mode 100644 index 0000000000..c44ff97fd0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_metadata_service_update_execution_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MetadataService_UpdateExecution_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_execution(): + # Create a client + client = aiplatform_v1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateExecutionRequest( + ) + + # Make the request + response = client.update_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MetadataService_UpdateExecution_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py new file mode 100644 index 0000000000..296fc1c8d6 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MigrationService_BatchMigrateResources_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MigrationService_BatchMigrateResources_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py new file mode 100644 index 0000000000..a8d6670023 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MigrationService_BatchMigrateResources_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_MigrationService_BatchMigrateResources_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_async.py new file mode 100644 index 0000000000..cc0ec495b7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MigrationService_SearchMigratableResources_async] +from google.cloud import aiplatform_v1 + + +async def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MigrationService_SearchMigratableResources_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py new file mode 100644 index 0000000000..081e93fa14 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_MigrationService_SearchMigratableResources_sync] +from google.cloud import aiplatform_v1 + + +def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1.MigrationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_MigrationService_SearchMigratableResources_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_async.py new file mode 100644 index 0000000000..cefd282eea --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_DeleteModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_DeleteModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_sync.py new file mode 100644 index 0000000000..88b4f48cc8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_DeleteModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_DeleteModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_async.py new file mode 100644 index 0000000000..cb27c3d9d8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelVersion +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_DeleteModelVersion_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_model_version(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_DeleteModelVersion_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_sync.py new file mode 100644 index 0000000000..070da48995 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_delete_model_version_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelVersion +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_DeleteModelVersion_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_model_version(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_DeleteModelVersion_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_async.py new file mode 100644 index 0000000000..6aefde7547 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ExportModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_export_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_ExportModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_sync.py new file mode 100644 index 0000000000..ef99724551 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_export_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ExportModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_export_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_ExportModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_async.py new file mode 100644 index 0000000000..f40e0f6b33 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_GetModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_GetModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_async.py new file mode 100644 index 0000000000..3aeb38f93b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_GetModelEvaluation_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_GetModelEvaluation_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py new file mode 100644 index 0000000000..f9513d9962 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py new file mode 100644 index 0000000000..722f24ae6f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_sync.py new file mode 100644 index 0000000000..1c0ade5d12 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_GetModelEvaluation_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_GetModelEvaluation_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_sync.py new file mode 100644 index 0000000000..ef2733a3ba --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_get_model_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_GetModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_GetModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_async.py new file mode 100644 index 0000000000..d705d6505f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ImportModelEvaluation_async] +from google.cloud import aiplatform_v1 + + +async def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = await client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_ImportModelEvaluation_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_sync.py new file mode 100644 index 0000000000..f2910ac56e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync] +from google.cloud import aiplatform_v1 + + +def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py new file mode 100644 index 0000000000..eb053302bd --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py new file mode 100644 index 0000000000..6b1537fb90 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_async.py new file mode 100644 index 0000000000..855b1efb42 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModelEvaluations_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModelEvaluations_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_sync.py new file mode 100644 index 0000000000..49d2f98139 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_evaluations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModelEvaluations_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModelEvaluations_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_async.py new file mode 100644 index 0000000000..f930d0c3a2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModelVersions_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_model_versions(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModelVersions_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_sync.py new file mode 100644 index 0000000000..3403c2de8e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_model_versions_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModelVersions_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_model_versions(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModelVersions_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_async.py new file mode 100644 index 0000000000..17cd1ed4d2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModels_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_models(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModels_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_sync.py new file mode 100644 index 0000000000..9a36fe9e08 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_list_models_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ListModels_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_models(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_ModelService_ListModels_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_async.py new file mode 100644 index 0000000000..f48cf4b425 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MergeVersionAliases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_MergeVersionAliases_async] +from google.cloud import aiplatform_v1 + + +async def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = await client.merge_version_aliases(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_MergeVersionAliases_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_sync.py new file mode 100644 index 0000000000..b7ac00822d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_merge_version_aliases_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MergeVersionAliases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_MergeVersionAliases_sync] +from google.cloud import aiplatform_v1 + + +def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = client.merge_version_aliases(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_MergeVersionAliases_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_async.py new file mode 100644 index 0000000000..a9e19dedaf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_UpdateModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_UpdateModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_sync.py new file mode 100644 index 0000000000..c2721d40c8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_update_model_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_UpdateModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = client.update_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_UpdateModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_async.py new file mode 100644 index 0000000000..66dcd0d0dd --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_UploadModel_async] +from google.cloud import aiplatform_v1 + + +async def sample_upload_model(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_UploadModel_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_sync.py new file mode 100644 index 0000000000..47abf4e6aa --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_model_service_upload_model_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_UploadModel_sync] +from google.cloud import aiplatform_v1 + + +def sample_upload_model(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_UploadModel_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py new file mode 100644 index 0000000000..4c7f478f37 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CancelPipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_pipeline_job(request=request) + + +# [END aiplatform_v1_generated_PipelineService_CancelPipelineJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py new file mode 100644 index 0000000000..a65be7ac8e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CancelPipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_pipeline_job(request=request) + + +# [END aiplatform_v1_generated_PipelineService_CancelPipelineJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py new file mode 100644 index 0000000000..69622f77c2 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + await client.cancel_training_pipeline(request=request) + + +# [END aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py new file mode 100644 index 0000000000..51f261d32a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + client.cancel_training_pipeline(request=request) + + +# [END aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py new file mode 100644 index 0000000000..64a5345922 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CreatePipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_CreatePipelineJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py new file mode 100644 index 0000000000..5ade700a43 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CreatePipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_CreatePipelineJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py new file mode 100644 index 0000000000..20d89bed38 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = await client.create_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py new file mode 100644 index 0000000000..33bfc0e76a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = client.create_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py new file mode 100644 index 0000000000..194f78a217 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_DeletePipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_DeletePipelineJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py new file mode 100644 index 0000000000..9f0dec1566 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_DeletePipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_DeletePipelineJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py new file mode 100644 index 0000000000..ffe3d6f1f1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py new file mode 100644 index 0000000000..790fb6a3cd --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py new file mode 100644 index 0000000000..452220e044 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_GetPipelineJob_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_GetPipelineJob_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py new file mode 100644 index 0000000000..a03dcab198 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_GetPipelineJob_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_GetPipelineJob_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py new file mode 100644 index 0000000000..08205ada78 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_GetTrainingPipeline_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = await client.get_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_GetTrainingPipeline_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py new file mode 100644 index 0000000000..41675e136a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_GetTrainingPipeline_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = client.get_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PipelineService_GetTrainingPipeline_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py new file mode 100644 index 0000000000..35a5c83dc9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_ListPipelineJobs_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_PipelineService_ListPipelineJobs_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py new file mode 100644 index 0000000000..23091c2308 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_ListPipelineJobs_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_PipelineService_ListPipelineJobs_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py new file mode 100644 index 0000000000..b17757b0a1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_ListTrainingPipelines_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_PipelineService_ListTrainingPipelines_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py new file mode 100644 index 0000000000..1ff3c708f1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PipelineService_ListTrainingPipelines_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_PipelineService_ListTrainingPipelines_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_async.py new file mode 100644 index 0000000000..5e5d44a881 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_Explain_async] +from google.cloud import aiplatform_v1 + + +async def sample_explain(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.explain(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PredictionService_Explain_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_sync.py new file mode 100644 index 0000000000..7c95bbaa5f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_explain_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_Explain_sync] +from google.cloud import aiplatform_v1 + + +def sample_explain(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.explain(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PredictionService_Explain_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_async.py new file mode 100644 index 0000000000..70c1e46874 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_Predict_async] +from google.cloud import aiplatform_v1 + + +async def sample_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PredictionService_Predict_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_sync.py new file mode 100644 index 0000000000..d57a908d0a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_predict_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_Predict_sync] +from google.cloud import aiplatform_v1 + + +def sample_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PredictionService_Predict_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_async.py new file mode 100644 index 0000000000..b8d3ae2619 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_RawPredict_async] +from google.cloud import aiplatform_v1 + + +async def sample_raw_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = await client.raw_predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PredictionService_RawPredict_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_sync.py new file mode 100644 index 0000000000..86f6e85494 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_prediction_service_raw_predict_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_RawPredict_sync] +from google.cloud import aiplatform_v1 + + +def sample_raw_predict(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = client.raw_predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PredictionService_RawPredict_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py new file mode 100644 index 0000000000..79033d48e4 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py new file mode 100644 index 0000000000..03d448f25e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py new file mode 100644 index 0000000000..d3bcbc7eec --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py new file mode 100644 index 0000000000..c575d152ac --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py new file mode 100644 index 0000000000..d47c5c9d85 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = await client.get_specialist_pool(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py new file mode 100644 index 0000000000..a08a7b83bb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = client.get_specialist_pool(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py new file mode 100644 index 0000000000..cbba73a939 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py new file mode 100644 index 0000000000..382fb61856 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py new file mode 100644 index 0000000000..e44a1e168e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py new file mode 100644 index 0000000000..d4fa9f0115 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py new file mode 100644 index 0000000000..00eb92475a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py new file mode 100644 index 0000000000..d74544e6f7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py new file mode 100644 index 0000000000..4406ccc535 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..c491c0e0ce --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..6e82e6e7bf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1 + + +async def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = await client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..5bd3676ae4 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1 + + +def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py new file mode 100644 index 0000000000..021cd448f1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboard_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboard_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py new file mode 100644 index 0000000000..7d9591c078 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = await client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..063e851b6d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py new file mode 100644 index 0000000000..03b4d5b0af --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = await client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py new file mode 100644 index 0000000000..ecaf504337 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py new file mode 100644 index 0000000000..df66cc046e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboard_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboard_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py new file mode 100644 index 0000000000..aeb57161b4 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..ec5996e2bc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py new file mode 100644 index 0000000000..d534d34371 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboard_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboard_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py new file mode 100644 index 0000000000..fe3cb985f1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..f73f3cba4c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py new file mode 100644 index 0000000000..feb3a67c18 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py new file mode 100644 index 0000000000..5a4404e657 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py new file mode 100644 index 0000000000..b1690032ce --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboard_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboard_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py new file mode 100644 index 0000000000..720b9b7493 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..454690c315 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..f4b62c9eca --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1 + + +async def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..dc56a42717 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1 + + +def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py new file mode 100644 index 0000000000..6cbb0d2833 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboard_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboard_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py new file mode 100644 index 0000000000..62b03f1654 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..422e366b4b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py new file mode 100644 index 0000000000..999deb6ce5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboardRun_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboardRun_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py new file mode 100644 index 0000000000..dc8cb3d326 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboardRun_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboardRun_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py new file mode 100644 index 0000000000..99d3cf05b5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboard_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboard_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py new file mode 100644 index 0000000000..0a59fa17ea --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..a933c092bc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py new file mode 100644 index 0000000000..44ffbf98b1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardExperiments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py new file mode 100644 index 0000000000..a826f4fe83 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardExperiments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py new file mode 100644 index 0000000000..a656b95b1b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py new file mode 100644 index 0000000000..1389821241 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py new file mode 100644 index 0000000000..332cf9252b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..15b99118cf --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py new file mode 100644 index 0000000000..cf1586f9fe --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboards +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboards_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboards_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py new file mode 100644 index 0000000000..f625ede9f8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboards +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ListTensorboards_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ListTensorboards_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py new file mode 100644 index 0000000000..61bbb8f6d9 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardBlobData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_async] +from google.cloud import aiplatform_v1 + + +async def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = await client.read_tensorboard_blob_data(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py new file mode 100644 index 0000000000..04db8236e7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardBlobData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync] +from google.cloud import aiplatform_v1 + + +def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = client.read_tensorboard_blob_data(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..270421cdad --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1 + + +async def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = await client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..723f625e8b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1 + + +def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py new file mode 100644 index 0000000000..2efcdd6f09 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboard_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboard_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py new file mode 100644 index 0000000000..ef1906dfcc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = await client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..a7ce7ee31f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py new file mode 100644 index 0000000000..355c3542b7 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = await client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py new file mode 100644 index 0000000000..d5227007cc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py new file mode 100644 index 0000000000..d79a4dbad3 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboard_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboard_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py new file mode 100644 index 0000000000..80de4e9ef4 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1 + + +async def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..5a666f95ed --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1 + + +def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py new file mode 100644 index 0000000000..cf6790bc7e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardExperimentData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_async] +from google.cloud import aiplatform_v1 + + +async def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = await client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py new file mode 100644 index 0000000000..3e9153b5e5 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardExperimentData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_sync] +from google.cloud import aiplatform_v1 + + +def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py new file mode 100644 index 0000000000..9164bacb8e --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardRunData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_async] +from google.cloud import aiplatform_v1 + + +async def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = await client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py new file mode 100644 index 0000000000..a75f7b99fc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardRunData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_sync] +from google.cloud import aiplatform_v1 + + +def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1.TensorboardServiceClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py new file mode 100644 index 0000000000..9a3f3c590c --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_AddTrialMeasurement_async] +from google.cloud import aiplatform_v1 + + +async def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = await client.add_trial_measurement(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_AddTrialMeasurement_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py new file mode 100644 index 0000000000..0cdb6f1d48 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_AddTrialMeasurement_sync] +from google.cloud import aiplatform_v1 + + +def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = client.add_trial_measurement(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_AddTrialMeasurement_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py new file mode 100644 index 0000000000..fd15098c55 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_async] +from google.cloud import aiplatform_v1 + + +async def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py new file mode 100644 index 0000000000..77f2459e5b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_sync] +from google.cloud import aiplatform_v1 + + +def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_async.py new file mode 100644 index 0000000000..cd58f93f6f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CompleteTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_complete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.complete_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CompleteTrial_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_sync.py new file mode 100644 index 0000000000..71118fc50b --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_complete_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CompleteTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_complete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = client.complete_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CompleteTrial_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_async.py new file mode 100644 index 0000000000..427fd4e819 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CreateStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + study = aiplatform_v1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = await client.create_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CreateStudy_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_sync.py new file mode 100644 index 0000000000..4ff6c72222 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_study_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CreateStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + study = aiplatform_v1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = client.create_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CreateStudy_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_async.py new file mode 100644 index 0000000000..ce3b895d53 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CreateTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_create_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CreateTrial_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_sync.py new file mode 100644 index 0000000000..5519fb4182 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_create_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_CreateTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_create_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_CreateTrial_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_async.py new file mode 100644 index 0000000000..cc4fd917bb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_DeleteStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + await client.delete_study(request=request) + + +# [END aiplatform_v1_generated_VizierService_DeleteStudy_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_sync.py new file mode 100644 index 0000000000..0c4c0e96e1 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_study_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_DeleteStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + client.delete_study(request=request) + + +# [END aiplatform_v1_generated_VizierService_DeleteStudy_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_async.py new file mode 100644 index 0000000000..71004f7944 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_DeleteTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_delete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + await client.delete_trial(request=request) + + +# [END aiplatform_v1_generated_VizierService_DeleteTrial_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_sync.py new file mode 100644 index 0000000000..a9645fa768 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_DeleteTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_delete_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + client.delete_trial(request=request) + + +# [END aiplatform_v1_generated_VizierService_DeleteTrial_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_async.py new file mode 100644 index 0000000000..896be0929d --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_GetStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = await client.get_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_GetStudy_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_sync.py new file mode 100644 index 0000000000..78403c40f4 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_study_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_GetStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = client.get_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_GetStudy_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_async.py new file mode 100644 index 0000000000..8d8f18a296 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_GetTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_get_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.get_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_GetTrial_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_sync.py new file mode 100644 index 0000000000..bbe4b42559 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_get_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_GetTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_get_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = client.get_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_GetTrial_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py new file mode 100644 index 0000000000..654800e7ba --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_ListOptimalTrials_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_optimal_trials(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_ListOptimalTrials_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py new file mode 100644 index 0000000000..afd546b2dc --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_ListOptimalTrials_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_optimal_trials(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_ListOptimalTrials_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_async.py new file mode 100644 index 0000000000..b0601edaa6 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_ListStudies_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_studies(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_VizierService_ListStudies_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_sync.py new file mode 100644 index 0000000000..58d586d9f8 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_studies_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_ListStudies_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_studies(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_VizierService_ListStudies_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_async.py new file mode 100644 index 0000000000..04acd2d138 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_ListTrials_async] +from google.cloud import aiplatform_v1 + + +async def sample_list_trials(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_VizierService_ListTrials_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_sync.py new file mode 100644 index 0000000000..81b15f8c4a --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_list_trials_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_ListTrials_sync] +from google.cloud import aiplatform_v1 + + +def sample_list_trials(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_VizierService_ListTrials_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_async.py new file mode 100644 index 0000000000..d02e6d3e9f --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_LookupStudy_async] +from google.cloud import aiplatform_v1 + + +async def sample_lookup_study(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = await client.lookup_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_LookupStudy_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_sync.py new file mode 100644 index 0000000000..fc63574a92 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_lookup_study_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_LookupStudy_sync] +from google.cloud import aiplatform_v1 + + +def sample_lookup_study(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = client.lookup_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_LookupStudy_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_async.py new file mode 100644 index 0000000000..9565c230eb --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_StopTrial_async] +from google.cloud import aiplatform_v1 + + +async def sample_stop_trial(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.stop_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_StopTrial_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_sync.py new file mode 100644 index 0000000000..9ee9f9cbf0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_stop_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_StopTrial_sync] +from google.cloud import aiplatform_v1 + + +def sample_stop_trial(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = client.stop_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_StopTrial_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_async.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_async.py new file mode 100644 index 0000000000..576fcb4da0 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_SuggestTrials_async] +from google.cloud import aiplatform_v1 + + +async def sample_suggest_trials(): + # Create a client + client = aiplatform_v1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_SuggestTrials_async] diff --git a/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_sync.py b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_sync.py new file mode 100644 index 0000000000..27d99dc261 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/aiplatform_v1_generated_vizier_service_suggest_trials_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_VizierService_SuggestTrials_sync] +from google.cloud import aiplatform_v1 + + +def sample_suggest_trials(): + # Create a client + client = aiplatform_v1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_VizierService_SuggestTrials_sync] diff --git a/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_aiplatform_v1.json b/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_aiplatform_v1.json new file mode 100644 index 0000000000..20a7d2ed99 --- /dev/null +++ b/owl-bot-staging/v1/samples/generated_samples/snippet_metadata_aiplatform_v1.json @@ -0,0 +1,30886 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.aiplatform.v1", + "version": "v1" + } + ], + "language": "PYTHON", + "name": "google-cloud-aiplatform" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.create_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.CreateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "aiplatform_v1_generated_dataset_service_create_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_CreateDataset_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_create_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.create_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.CreateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "aiplatform_v1_generated_dataset_service_create_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_CreateDataset_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_create_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.delete_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.DeleteDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "aiplatform_v1_generated_dataset_service_delete_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_DeleteDataset_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_delete_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.delete_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.DeleteDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "aiplatform_v1_generated_dataset_service_delete_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_DeleteDataset_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_delete_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.export_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ExportData", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "aiplatform_v1_generated_dataset_service_export_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ExportData_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_export_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.export_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ExportData", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "aiplatform_v1_generated_dataset_service_export_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ExportData_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_export_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_GetAnnotationSpec_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_get_annotation_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_GetAnnotationSpec_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_get_annotation_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.get_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "aiplatform_v1_generated_dataset_service_get_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_GetDataset_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_get_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.get_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.GetDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "aiplatform_v1_generated_dataset_service_get_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_GetDataset_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_get_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.import_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ImportData", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "aiplatform_v1_generated_dataset_service_import_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ImportData_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_import_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.import_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ImportData", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "aiplatform_v1_generated_dataset_service_import_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ImportData_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_import_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.list_annotations", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListAnnotations", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListAnnotations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager", + "shortName": "list_annotations" + }, + "description": "Sample for ListAnnotations", + "file": "aiplatform_v1_generated_dataset_service_list_annotations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ListAnnotations_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_list_annotations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.list_annotations", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListAnnotations", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListAnnotations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager", + "shortName": "list_annotations" + }, + "description": "Sample for ListAnnotations", + "file": "aiplatform_v1_generated_dataset_service_list_annotations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ListAnnotations_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_list_annotations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.list_data_items", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDataItems", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDataItems" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager", + "shortName": "list_data_items" + }, + "description": "Sample for ListDataItems", + "file": "aiplatform_v1_generated_dataset_service_list_data_items_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ListDataItems_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_list_data_items_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.list_data_items", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDataItems", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDataItems" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager", + "shortName": "list_data_items" + }, + "description": "Sample for ListDataItems", + "file": "aiplatform_v1_generated_dataset_service_list_data_items_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ListDataItems_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_list_data_items_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.list_datasets", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDatasets", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "aiplatform_v1_generated_dataset_service_list_datasets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ListDatasets_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_list_datasets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.list_datasets", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.ListDatasets", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "aiplatform_v1_generated_dataset_service_list_datasets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_ListDatasets_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_list_datasets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceAsyncClient.update_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.UpdateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "aiplatform_v1_generated_dataset_service_update_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_UpdateDataset_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_update_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DatasetServiceClient.update_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1.DatasetService.UpdateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "aiplatform_v1_generated_dataset_service_update_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DatasetService_UpdateDataset_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_dataset_service_update_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.create_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "CreateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_endpoint" + }, + "description": "Sample for CreateEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.create_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "CreateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_endpoint" + }, + "description": "Sample for CreateEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.delete_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeleteEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_endpoint" + }, + "description": "Sample for DeleteEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.delete_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeleteEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_endpoint" + }, + "description": "Sample for DeleteEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.deploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.deploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.get_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "GetEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "get_endpoint" + }, + "description": "Sample for GetEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.get_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "GetEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "get_endpoint" + }, + "description": "Sample for GetEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.list_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.ListEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "ListEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager", + "shortName": "list_endpoints" + }, + "description": "Sample for ListEndpoints", + "file": "aiplatform_v1_generated_endpoint_service_list_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_ListEndpoints_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_list_endpoints_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.list_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.ListEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "ListEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager", + "shortName": "list_endpoints" + }, + "description": "Sample for ListEndpoints", + "file": "aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_ListEndpoints_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_list_endpoints_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.undeploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UndeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "aiplatform_v1_generated_endpoint_service_undeploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_UndeployModel_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_undeploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.undeploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UndeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_UndeployModel_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_undeploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.update_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UpdateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "update_endpoint" + }, + "description": "Sample for UpdateEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_update_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_UpdateEndpoint_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_update_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.update_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UpdateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "update_endpoint" + }, + "description": "Sample for UpdateEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_UpdateEndpoint_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_update_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient.read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "ReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" + }, + "description": "Sample for ReadFeatureValues", + "file": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient.read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "ReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" + }, + "description": "Sample for ReadFeatureValues", + "file": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "StreamingReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" + }, + "description": "Sample for StreamingReadFeatureValues", + "file": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "StreamingReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" + }, + "description": "Sample for StreamingReadFeatureValues", + "file": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.batch_create_features", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchCreateFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_create_features" + }, + "description": "Sample for BatchCreateFeatures", + "file": "aiplatform_v1_generated_featurestore_service_batch_create_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_create_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.batch_create_features", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchCreateFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_create_features" + }, + "description": "Sample for BatchCreateFeatures", + "file": "aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchCreateFeatures_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_create_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.batch_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_read_feature_values" + }, + "description": "Sample for BatchReadFeatureValues", + "file": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_async", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.batch_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_read_feature_values" + }, + "description": "Sample for BatchReadFeatureValues", + "file": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_BatchReadFeatureValues_sync", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_batch_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.create_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_entity_type" + }, + "description": "Sample for CreateEntityType", + "file": "aiplatform_v1_generated_featurestore_service_create_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateEntityType_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_create_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.create_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_entity_type" + }, + "description": "Sample for CreateEntityType", + "file": "aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateEntityType_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_create_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.create_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature" + }, + "description": "Sample for CreateFeature", + "file": "aiplatform_v1_generated_featurestore_service_create_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeature_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_create_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.create_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature" + }, + "description": "Sample for CreateFeature", + "file": "aiplatform_v1_generated_featurestore_service_create_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeature_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_create_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.create_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_featurestore" + }, + "description": "Sample for CreateFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_create_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_create_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.create_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_featurestore" + }, + "description": "Sample for CreateFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_CreateFeaturestore_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_create_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.delete_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_entity_type" + }, + "description": "Sample for DeleteEntityType", + "file": "aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.delete_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_entity_type" + }, + "description": "Sample for DeleteEntityType", + "file": "aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteEntityType_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.delete_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature" + }, + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1_generated_featurestore_service_delete_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeature_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.delete_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature" + }, + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1_generated_featurestore_service_delete_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeature_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.delete_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_featurestore" + }, + "description": "Sample for DeleteFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.delete_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_featurestore" + }, + "description": "Sample for DeleteFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_DeleteFeaturestore_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_delete_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.export_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ExportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_feature_values" + }, + "description": "Sample for ExportFeatureValues", + "file": "aiplatform_v1_generated_featurestore_service_export_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_export_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.export_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ExportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_feature_values" + }, + "description": "Sample for ExportFeatureValues", + "file": "aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ExportFeatureValues_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_export_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.get_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "get_entity_type" + }, + "description": "Sample for GetEntityType", + "file": "aiplatform_v1_generated_featurestore_service_get_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetEntityType_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_get_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.get_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "get_entity_type" + }, + "description": "Sample for GetEntityType", + "file": "aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetEntityType_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_get_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.get_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "get_feature" + }, + "description": "Sample for GetFeature", + "file": "aiplatform_v1_generated_featurestore_service_get_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeature_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_get_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.get_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "get_feature" + }, + "description": "Sample for GetFeature", + "file": "aiplatform_v1_generated_featurestore_service_get_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeature_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_get_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.get_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Featurestore", + "shortName": "get_featurestore" + }, + "description": "Sample for GetFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_get_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_get_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.get_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Featurestore", + "shortName": "get_featurestore" + }, + "description": "Sample for GetFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_GetFeaturestore_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_get_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.import_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ImportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_feature_values" + }, + "description": "Sample for ImportFeatureValues", + "file": "aiplatform_v1_generated_featurestore_service_import_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_import_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.import_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ImportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_feature_values" + }, + "description": "Sample for ImportFeatureValues", + "file": "aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ImportFeatureValues_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_import_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.list_entity_types", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListEntityTypes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesAsyncPager", + "shortName": "list_entity_types" + }, + "description": "Sample for ListEntityTypes", + "file": "aiplatform_v1_generated_featurestore_service_list_entity_types_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_list_entity_types_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.list_entity_types", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListEntityTypes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesPager", + "shortName": "list_entity_types" + }, + "description": "Sample for ListEntityTypes", + "file": "aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListEntityTypes_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_list_entity_types_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.list_features", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesAsyncPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1_generated_featurestore_service_list_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeatures_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_list_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.list_features", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1_generated_featurestore_service_list_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeatures_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_list_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.list_featurestores", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeaturestores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager", + "shortName": "list_featurestores" + }, + "description": "Sample for ListFeaturestores", + "file": "aiplatform_v1_generated_featurestore_service_list_featurestores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_list_featurestores_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.list_featurestores", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeaturestores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresPager", + "shortName": "list_featurestores" + }, + "description": "Sample for ListFeaturestores", + "file": "aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_ListFeaturestores_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_list_featurestores_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.search_features", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "SearchFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesAsyncPager", + "shortName": "search_features" + }, + "description": "Sample for SearchFeatures", + "file": "aiplatform_v1_generated_featurestore_service_search_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_SearchFeatures_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_search_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.search_features", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "SearchFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesPager", + "shortName": "search_features" + }, + "description": "Sample for SearchFeatures", + "file": "aiplatform_v1_generated_featurestore_service_search_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_SearchFeatures_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_search_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.update_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "update_entity_type" + }, + "description": "Sample for UpdateEntityType", + "file": "aiplatform_v1_generated_featurestore_service_update_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_update_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.update_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.EntityType", + "shortName": "update_entity_type" + }, + "description": "Sample for UpdateEntityType", + "file": "aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateEntityType_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_update_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.update_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "update_feature" + }, + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1_generated_featurestore_service_update_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeature_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_update_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.update_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Feature", + "shortName": "update_feature" + }, + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1_generated_featurestore_service_update_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeature_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_update_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient.update_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_featurestore" + }, + "description": "Sample for UpdateFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_update_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_update_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.FeaturestoreServiceClient.update_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_featurestore" + }, + "description": "Sample for UpdateFeaturestore", + "file": "aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_FeaturestoreService_UpdateFeaturestore_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_featurestore_service_update_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.create_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "CreateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index_endpoint" + }, + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.create_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "CreateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index_endpoint" + }, + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.delete_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeleteIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index_endpoint" + }, + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.delete_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeleteIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index_endpoint" + }, + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.deploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_index" + }, + "description": "Sample for DeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.deploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_index" + }, + "description": "Sample for DeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.get_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "GetIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "get_index_endpoint" + }, + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.get_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "GetIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "get_index_endpoint" + }, + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.list_index_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "ListIndexEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", + "shortName": "list_index_endpoints" + }, + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.list_index_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "ListIndexEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", + "shortName": "list_index_endpoints" + }, + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.mutate_deployed_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "MutateDeployedIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "mutate_deployed_index" + }, + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.mutate_deployed_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "MutateDeployedIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "mutate_deployed_index" + }, + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.undeploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UndeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_index" + }, + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.undeploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UndeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_index" + }, + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.update_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UpdateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "update_index_endpoint" + }, + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.update_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UpdateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "update_index_endpoint" + }, + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.create_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "CreateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index" + }, + "description": "Sample for CreateIndex", + "file": "aiplatform_v1_generated_index_service_create_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_create_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.create_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "CreateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index" + }, + "description": "Sample for CreateIndex", + "file": "aiplatform_v1_generated_index_service_create_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_create_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.delete_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "DeleteIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index" + }, + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1_generated_index_service_delete_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_delete_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.delete_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "DeleteIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index" + }, + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1_generated_index_service_delete_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_delete_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.get_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "GetIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Index", + "shortName": "get_index" + }, + "description": "Sample for GetIndex", + "file": "aiplatform_v1_generated_index_service_get_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_get_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.get_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "GetIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Index", + "shortName": "get_index" + }, + "description": "Sample for GetIndex", + "file": "aiplatform_v1_generated_index_service_get_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_get_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.list_indexes", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "ListIndexes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesAsyncPager", + "shortName": "list_indexes" + }, + "description": "Sample for ListIndexes", + "file": "aiplatform_v1_generated_index_service_list_indexes_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_list_indexes_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.list_indexes", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "ListIndexes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesPager", + "shortName": "list_indexes" + }, + "description": "Sample for ListIndexes", + "file": "aiplatform_v1_generated_index_service_list_indexes_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_list_indexes_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.update_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "UpdateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_index" + }, + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1_generated_index_service_update_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_update_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.update_index", + "method": { + "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" + }, + "shortName": "UpdateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_index" + }, + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1_generated_index_service_update_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_index_service_update_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" + }, + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" + }, + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" + }, + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" + }, + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" + }, + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" + }, + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" + }, + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" + }, + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" + }, + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" + }, + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "create_custom_job" + }, + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1_generated_job_service_create_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "create_custom_job" + }, + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1_generated_job_service_create_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" + }, + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" + }, + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" + }, + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" + }, + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" + }, + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" + }, + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_batch_prediction_job" + }, + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_batch_prediction_job" + }, + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_custom_job" + }, + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1_generated_job_service_delete_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_custom_job" + }, + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_data_labeling_job" + }, + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_data_labeling_job" + }, + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_hyperparameter_tuning_job" + }, + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_hyperparameter_tuning_job" + }, + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_deployment_monitoring_job" + }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_deployment_monitoring_job" + }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" + }, + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" + }, + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" + }, + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1_generated_job_service_get_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" + }, + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1_generated_job_service_get_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" + }, + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" + }, + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" + }, + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" + }, + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" + }, + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "GetModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" + }, + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_batch_prediction_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListBatchPredictionJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", + "shortName": "list_batch_prediction_jobs" + }, + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_batch_prediction_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListBatchPredictionJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager", + "shortName": "list_batch_prediction_jobs" + }, + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_custom_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListCustomJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager", + "shortName": "list_custom_jobs" + }, + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_custom_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListCustomJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager", + "shortName": "list_custom_jobs" + }, + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_data_labeling_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListDataLabelingJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", + "shortName": "list_data_labeling_jobs" + }, + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_data_labeling_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListDataLabelingJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager", + "shortName": "list_data_labeling_jobs" + }, + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListHyperparameterTuningJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", + "shortName": "list_hyperparameter_tuning_jobs" + }, + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_hyperparameter_tuning_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListHyperparameterTuningJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager", + "shortName": "list_hyperparameter_tuning_jobs" + }, + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListModelDeploymentMonitoringJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", + "shortName": "list_model_deployment_monitoring_jobs" + }, + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_model_deployment_monitoring_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ListModelDeploymentMonitoringJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", + "shortName": "list_model_deployment_monitoring_jobs" + }, + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "PauseModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" + }, + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.pause_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "PauseModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" + }, + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ResumeModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" + }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.resume_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "ResumeModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" + }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" + }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", + "segments": [ + { + "end": 46, + "start": 27, + "type": "FULL" + }, + { + "end": 46, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 47, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" + }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", + "segments": [ + { + "end": 46, + "start": 27, + "type": "FULL" + }, + { + "end": 46, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 47, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.update_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "UpdateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_model_deployment_monitoring_job" + }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.update_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" + }, + "shortName": "UpdateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_model_deployment_monitoring_job" + }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextArtifactsAndExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" + }, + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_artifacts_and_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextArtifactsAndExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" + }, + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_children", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextChildren" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", + "shortName": "add_context_children" + }, + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1_generated_metadata_service_add_context_children_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_children_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_children", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextChildren" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", + "shortName": "add_context_children" + }, + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_execution_events", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddExecutionEvents" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" + }, + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_execution_events", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddExecutionEvents" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" + }, + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "create_artifact" + }, + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1_generated_metadata_service_create_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "create_artifact" + }, + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "create_context" + }, + "description": "Sample for CreateContext", + "file": "aiplatform_v1_generated_metadata_service_create_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "create_context" + }, + "description": "Sample for CreateContext", + "file": "aiplatform_v1_generated_metadata_service_create_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "create_execution" + }, + "description": "Sample for CreateExecution", + "file": "aiplatform_v1_generated_metadata_service_create_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "create_execution" + }, + "description": "Sample for CreateExecution", + "file": "aiplatform_v1_generated_metadata_service_create_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "create_metadata_schema" + }, + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "create_metadata_schema" + }, + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_metadata_store" + }, + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_metadata_store" + }, + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_artifact" + }, + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_artifact" + }, + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_context" + }, + "description": "Sample for DeleteContext", + "file": "aiplatform_v1_generated_metadata_service_delete_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_context" + }, + "description": "Sample for DeleteContext", + "file": "aiplatform_v1_generated_metadata_service_delete_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_execution" + }, + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1_generated_metadata_service_delete_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_execution" + }, + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_metadata_store" + }, + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_metadata_store" + }, + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "get_artifact" + }, + "description": "Sample for GetArtifact", + "file": "aiplatform_v1_generated_metadata_service_get_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "get_artifact" + }, + "description": "Sample for GetArtifact", + "file": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "get_context" + }, + "description": "Sample for GetContext", + "file": "aiplatform_v1_generated_metadata_service_get_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "get_context" + }, + "description": "Sample for GetContext", + "file": "aiplatform_v1_generated_metadata_service_get_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "get_execution" + }, + "description": "Sample for GetExecution", + "file": "aiplatform_v1_generated_metadata_service_get_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "get_execution" + }, + "description": "Sample for GetExecution", + "file": "aiplatform_v1_generated_metadata_service_get_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "get_metadata_schema" + }, + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "get_metadata_schema" + }, + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", + "shortName": "get_metadata_store" + }, + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", + "shortName": "get_metadata_store" + }, + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager", + "shortName": "list_artifacts" + }, + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsPager", + "shortName": "list_artifacts" + }, + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager", + "shortName": "list_contexts" + }, + "description": "Sample for ListContexts", + "file": "aiplatform_v1_generated_metadata_service_list_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_contexts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsPager", + "shortName": "list_contexts" + }, + "description": "Sample for ListContexts", + "file": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager", + "shortName": "list_executions" + }, + "description": "Sample for ListExecutions", + "file": "aiplatform_v1_generated_metadata_service_list_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_executions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsPager", + "shortName": "list_executions" + }, + "description": "Sample for ListExecutions", + "file": "aiplatform_v1_generated_metadata_service_list_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_executions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_schemas", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataSchemas" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", + "shortName": "list_metadata_schemas" + }, + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_schemas", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataSchemas" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasPager", + "shortName": "list_metadata_schemas" + }, + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_stores", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataStores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", + "shortName": "list_metadata_stores" + }, + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_stores", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataStores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresPager", + "shortName": "list_metadata_stores" + }, + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_artifacts" + }, + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_artifacts" + }, + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_contexts" + }, + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_contexts" + }, + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_executions" + }, + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1_generated_metadata_service_purge_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_purge_executions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_executions" + }, + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryArtifactLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" + }, + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_artifact_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryArtifactLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" + }, + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_context_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryContextLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" + }, + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_context_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryContextLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" + }, + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryExecutionInputsAndOutputs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" + }, + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_execution_inputs_and_outputs", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryExecutionInputsAndOutputs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" + }, + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "update_artifact" + }, + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1_generated_metadata_service_update_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "update_artifact" + }, + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "update_context" + }, + "description": "Sample for UpdateContext", + "file": "aiplatform_v1_generated_metadata_service_update_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "update_context" + }, + "description": "Sample for UpdateContext", + "file": "aiplatform_v1_generated_metadata_service_update_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "update_execution" + }, + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1_generated_metadata_service_update_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "update_execution" + }, + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1_generated_metadata_service_update_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.batch_migrate_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "BatchMigrateResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_migrate_resources" + }, + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.batch_migrate_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "BatchMigrateResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_migrate_resources" + }, + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.search_migratable_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "SearchMigratableResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", + "shortName": "search_migratable_resources" + }, + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.search_migratable_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "SearchMigratableResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager", + "shortName": "search_migratable_resources" + }, + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.delete_model_version", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModelVersion", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_version" + }, + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1_generated_model_service_delete_model_version_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModelVersion_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_version_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.delete_model_version", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModelVersion", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_version" + }, + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1_generated_model_service_delete_model_version_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModelVersion_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_version_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.delete_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "aiplatform_v1_generated_model_service_delete_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.delete_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "aiplatform_v1_generated_model_service_delete_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.export_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "aiplatform_v1_generated_model_service_export_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_export_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.export_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "aiplatform_v1_generated_model_service_export_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_export_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "aiplatform_v1_generated_model_service_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModel_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "aiplatform_v1_generated_model_service_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModel_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.import_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "import_model_evaluation" + }, + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.import_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "import_model_evaluation" + }, + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_versions", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelVersions", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelVersionsRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsAsyncPager", + "shortName": "list_model_versions" + }, + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1_generated_model_service_list_model_versions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelVersions_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_versions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_versions", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelVersions", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelVersionsRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsPager", + "shortName": "list_model_versions" + }, + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1_generated_model_service_list_model_versions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelVersions_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_versions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_models", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "aiplatform_v1_generated_model_service_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModels_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_models", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "aiplatform_v1_generated_model_service_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModels_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_models_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.merge_version_aliases", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.MergeVersionAliases", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "MergeVersionAliases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "version_aliases", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "merge_version_aliases" + }, + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1_generated_model_service_merge_version_aliases_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_MergeVersionAliases_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_merge_version_aliases_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.merge_version_aliases", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.MergeVersionAliases", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "MergeVersionAliases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "version_aliases", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "merge_version_aliases" + }, + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1_generated_model_service_merge_version_aliases_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_MergeVersionAliases_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_merge_version_aliases_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.update_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "update_model" + }, + "description": "Sample for UpdateModel", + "file": "aiplatform_v1_generated_model_service_update_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_update_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.update_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "update_model" + }, + "description": "Sample for UpdateModel", + "file": "aiplatform_v1_generated_model_service_update_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_update_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.upload_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UploadModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "upload_model" + }, + "description": "Sample for UploadModel", + "file": "aiplatform_v1_generated_model_service_upload_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_upload_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.upload_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UploadModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "upload_model" + }, + "description": "Sample for UploadModel", + "file": "aiplatform_v1_generated_model_service_upload_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_upload_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.cancel_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" + }, + "description": "Sample for CancelPipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CancelPipelineJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.cancel_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" + }, + "description": "Sample for CancelPipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CancelPipelineJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.cancel_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" + }, + "description": "Sample for CancelTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.cancel_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" + }, + "description": "Sample for CancelTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CancelTrainingPipeline_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_cancel_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.create_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreatePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "create_pipeline_job" + }, + "description": "Sample for CreatePipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CreatePipelineJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.create_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreatePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "create_pipeline_job" + }, + "description": "Sample for CreatePipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CreatePipelineJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_create_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.create_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreateTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "create_training_pipeline" + }, + "description": "Sample for CreateTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.create_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreateTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "create_training_pipeline" + }, + "description": "Sample for CreateTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_CreateTrainingPipeline_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_create_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.delete_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeletePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_pipeline_job" + }, + "description": "Sample for DeletePipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_DeletePipelineJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.delete_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeletePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_pipeline_job" + }, + "description": "Sample for DeletePipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_DeletePipelineJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.delete_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeleteTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_training_pipeline" + }, + "description": "Sample for DeleteTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.delete_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeleteTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_training_pipeline" + }, + "description": "Sample for DeleteTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_DeleteTrainingPipeline_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_delete_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.get_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "get_pipeline_job" + }, + "description": "Sample for GetPipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_GetPipelineJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.get_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PipelineJob", + "shortName": "get_pipeline_job" + }, + "description": "Sample for GetPipelineJob", + "file": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_GetPipelineJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_get_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.get_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "get_training_pipeline" + }, + "description": "Sample for GetTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_GetTrainingPipeline_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.get_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TrainingPipeline", + "shortName": "get_training_pipeline" + }, + "description": "Sample for GetTrainingPipeline", + "file": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_GetTrainingPipeline_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_get_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.list_pipeline_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListPipelineJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager", + "shortName": "list_pipeline_jobs" + }, + "description": "Sample for ListPipelineJobs", + "file": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_ListPipelineJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.list_pipeline_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListPipelineJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager", + "shortName": "list_pipeline_jobs" + }, + "description": "Sample for ListPipelineJobs", + "file": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_ListPipelineJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_list_pipeline_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceAsyncClient.list_training_pipelines", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListTrainingPipelines" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager", + "shortName": "list_training_pipelines" + }, + "description": "Sample for ListTrainingPipelines", + "file": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_ListTrainingPipelines_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PipelineServiceClient.list_training_pipelines", + "method": { + "fullName": "google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines", + "service": { + "fullName": "google.cloud.aiplatform.v1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListTrainingPipelines" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager", + "shortName": "list_training_pipelines" + }, + "description": "Sample for ListTrainingPipelines", + "file": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PipelineService_ListTrainingPipelines_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_pipeline_service_list_training_pipelines_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient.explain", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Explain", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Explain" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ExplainResponse", + "shortName": "explain" + }, + "description": "Sample for Explain", + "file": "aiplatform_v1_generated_prediction_service_explain_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_Explain_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_explain_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient.explain", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Explain", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Explain" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ExplainResponse", + "shortName": "explain" + }, + "description": "Sample for Explain", + "file": "aiplatform_v1_generated_prediction_service_explain_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_Explain_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_explain_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient.predict", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "aiplatform_v1_generated_prediction_service_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_Predict_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient.predict", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "aiplatform_v1_generated_prediction_service_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_Predict_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_predict_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient.raw_predict", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.RawPredict", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "RawPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" + }, + "description": "Sample for RawPredict", + "file": "aiplatform_v1_generated_prediction_service_raw_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_RawPredict_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_raw_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient.raw_predict", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.RawPredict", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "RawPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" + }, + "description": "Sample for RawPredict", + "file": "aiplatform_v1_generated_prediction_service_raw_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_RawPredict_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_raw_predict_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.create_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "CreateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_specialist_pool" + }, + "description": "Sample for CreateSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.create_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "CreateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_specialist_pool" + }, + "description": "Sample for CreateSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.delete_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "DeleteSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_specialist_pool" + }, + "description": "Sample for DeleteSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.delete_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "DeleteSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_specialist_pool" + }, + "description": "Sample for DeleteSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_DeleteSpecialistPool_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_delete_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.get_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "GetSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.SpecialistPool", + "shortName": "get_specialist_pool" + }, + "description": "Sample for GetSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.get_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "GetSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.SpecialistPool", + "shortName": "get_specialist_pool" + }, + "description": "Sample for GetSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_GetSpecialistPool_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_get_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.list_specialist_pools", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "ListSpecialistPools" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager", + "shortName": "list_specialist_pools" + }, + "description": "Sample for ListSpecialistPools", + "file": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.list_specialist_pools", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "ListSpecialistPools" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager", + "shortName": "list_specialist_pools" + }, + "description": "Sample for ListSpecialistPools", + "file": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_ListSpecialistPools_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_list_specialist_pools_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceAsyncClient.update_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "UpdateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_specialist_pool" + }, + "description": "Sample for UpdateSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.SpecialistPoolServiceClient.update_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "UpdateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_specialist_pool" + }, + "description": "Sample for UpdateSpecialistPool", + "file": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_SpecialistPoolService_UpdateSpecialistPool_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_specialist_pool_service_update_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.batch_create_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" + }, + "description": "Sample for BatchCreateTensorboardRuns", + "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.batch_create_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" + }, + "description": "Sample for BatchCreateTensorboardRuns", + "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardRuns_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.batch_create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" + }, + "description": "Sample for BatchCreateTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.batch_create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" + }, + "description": "Sample for BatchCreateTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.batch_read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" + }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", + "file": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.batch_read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" + }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", + "file": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" + }, + "description": "Sample for CreateTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" + }, + "description": "Sample for CreateTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardExperiment_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "create_tensorboard_run" + }, + "description": "Sample for CreateTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "create_tensorboard_run" + }, + "description": "Sample for CreateTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardRun_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" + }, + "description": "Sample for CreateTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" + }, + "description": "Sample for CreateTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboardTimeSeries_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.create_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_tensorboard" + }, + "description": "Sample for CreateTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboard_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.create_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_tensorboard" + }, + "description": "Sample for CreateTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_CreateTensorboard_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_create_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_experiment" + }, + "description": "Sample for DeleteTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_experiment" + }, + "description": "Sample for DeleteTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardExperiment_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_run" + }, + "description": "Sample for DeleteTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_run" + }, + "description": "Sample for DeleteTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardRun_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_time_series" + }, + "description": "Sample for DeleteTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_time_series" + }, + "description": "Sample for DeleteTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.delete_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard" + }, + "description": "Sample for DeleteTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboard_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.delete_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard" + }, + "description": "Sample for DeleteTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_DeleteTensorboard_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_delete_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.export_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ExportTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager", + "shortName": "export_tensorboard_time_series_data" + }, + "description": "Sample for ExportTensorboardTimeSeriesData", + "file": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.export_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ExportTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager", + "shortName": "export_tensorboard_time_series_data" + }, + "description": "Sample for ExportTensorboardTimeSeriesData", + "file": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" + }, + "description": "Sample for GetTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" + }, + "description": "Sample for GetTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardExperiment_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "get_tensorboard_run" + }, + "description": "Sample for GetTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardRun_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "get_tensorboard_run" + }, + "description": "Sample for GetTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardRun_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" + }, + "description": "Sample for GetTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" + }, + "description": "Sample for GetTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboardTimeSeries_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.get_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Tensorboard", + "shortName": "get_tensorboard" + }, + "description": "Sample for GetTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboard_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.get_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.GetTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Tensorboard", + "shortName": "get_tensorboard" + }, + "description": "Sample for GetTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_GetTensorboard_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_get_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboard_experiments", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardExperiments" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager", + "shortName": "list_tensorboard_experiments" + }, + "description": "Sample for ListTensorboardExperiments", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboard_experiments", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardExperiments" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager", + "shortName": "list_tensorboard_experiments" + }, + "description": "Sample for ListTensorboardExperiments", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardExperiments_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_experiments_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager", + "shortName": "list_tensorboard_runs" + }, + "description": "Sample for ListTensorboardRuns", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsPager", + "shortName": "list_tensorboard_runs" + }, + "description": "Sample for ListTensorboardRuns", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardRuns_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_runs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager", + "shortName": "list_tensorboard_time_series" + }, + "description": "Sample for ListTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager", + "shortName": "list_tensorboard_time_series" + }, + "description": "Sample for ListTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboardTimeSeries_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.list_tensorboards", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboards", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboards" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager", + "shortName": "list_tensorboards" + }, + "description": "Sample for ListTensorboards", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboards_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.list_tensorboards", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ListTensorboards", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboards" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsPager", + "shortName": "list_tensorboards" + }, + "description": "Sample for ListTensorboards", + "file": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ListTensorboards_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_list_tensorboards_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardBlobData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" + }, + "description": "Sample for ReadTensorboardBlobData", + "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.read_tensorboard_blob_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardBlobData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" + }, + "description": "Sample for ReadTensorboardBlobData", + "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" + }, + "description": "Sample for ReadTensorboardTimeSeriesData", + "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" + }, + "description": "Sample for ReadTensorboardTimeSeriesData", + "file": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" + }, + "description": "Sample for UpdateTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" + }, + "description": "Sample for UpdateTensorboardExperiment", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardExperiment_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "update_tensorboard_run" + }, + "description": "Sample for UpdateTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardRun", + "shortName": "update_tensorboard_run" + }, + "description": "Sample for UpdateTensorboardRun", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardRun_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" + }, + "description": "Sample for UpdateTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" + }, + "description": "Sample for UpdateTensorboardTimeSeries", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.update_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_tensorboard" + }, + "description": "Sample for UpdateTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboard_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.update_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_tensorboard" + }, + "description": "Sample for UpdateTensorboard", + "file": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_UpdateTensorboard_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_update_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.write_tensorboard_experiment_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardExperimentData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" + }, + "description": "Sample for WriteTensorboardExperimentData", + "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.write_tensorboard_experiment_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardExperimentData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" + }, + "description": "Sample for WriteTensorboardExperimentData", + "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardExperimentData_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceAsyncClient.write_tensorboard_run_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardRunData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" + }, + "description": "Sample for WriteTensorboardRunData", + "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.TensorboardServiceClient.write_tensorboard_run_data", + "method": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData", + "service": { + "fullName": "google.cloud.aiplatform.v1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardRunData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" + }, + "description": "Sample for WriteTensorboardRunData", + "file": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_TensorboardService_WriteTensorboardRunData_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_tensorboard_service_write_tensorboard_run_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.add_trial_measurement", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "AddTrialMeasurement" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "add_trial_measurement" + }, + "description": "Sample for AddTrialMeasurement", + "file": "aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_AddTrialMeasurement_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_add_trial_measurement_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.add_trial_measurement", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "AddTrialMeasurement" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "add_trial_measurement" + }, + "description": "Sample for AddTrialMeasurement", + "file": "aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_AddTrialMeasurement_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_add_trial_measurement_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.check_trial_early_stopping_state", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CheckTrialEarlyStoppingState" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "check_trial_early_stopping_state" + }, + "description": "Sample for CheckTrialEarlyStoppingState", + "file": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.check_trial_early_stopping_state", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CheckTrialEarlyStoppingState" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "check_trial_early_stopping_state" + }, + "description": "Sample for CheckTrialEarlyStoppingState", + "file": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CheckTrialEarlyStoppingState_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_check_trial_early_stopping_state_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.complete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CompleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CompleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "complete_trial" + }, + "description": "Sample for CompleteTrial", + "file": "aiplatform_v1_generated_vizier_service_complete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CompleteTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_complete_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.complete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CompleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CompleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "complete_trial" + }, + "description": "Sample for CompleteTrial", + "file": "aiplatform_v1_generated_vizier_service_complete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CompleteTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_complete_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.create_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "create_study" + }, + "description": "Sample for CreateStudy", + "file": "aiplatform_v1_generated_vizier_service_create_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CreateStudy_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_create_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.create_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "create_study" + }, + "description": "Sample for CreateStudy", + "file": "aiplatform_v1_generated_vizier_service_create_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CreateStudy_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_create_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.create_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "create_trial" + }, + "description": "Sample for CreateTrial", + "file": "aiplatform_v1_generated_vizier_service_create_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CreateTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_create_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.create_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.CreateTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "create_trial" + }, + "description": "Sample for CreateTrial", + "file": "aiplatform_v1_generated_vizier_service_create_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_CreateTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_create_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.delete_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" + }, + "description": "Sample for DeleteStudy", + "file": "aiplatform_v1_generated_vizier_service_delete_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_DeleteStudy_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_delete_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.delete_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" + }, + "description": "Sample for DeleteStudy", + "file": "aiplatform_v1_generated_vizier_service_delete_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_DeleteStudy_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_delete_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.delete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" + }, + "description": "Sample for DeleteTrial", + "file": "aiplatform_v1_generated_vizier_service_delete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_DeleteTrial_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_delete_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.delete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.DeleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" + }, + "description": "Sample for DeleteTrial", + "file": "aiplatform_v1_generated_vizier_service_delete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_DeleteTrial_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_delete_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.get_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "get_study" + }, + "description": "Sample for GetStudy", + "file": "aiplatform_v1_generated_vizier_service_get_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_GetStudy_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_get_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.get_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "get_study" + }, + "description": "Sample for GetStudy", + "file": "aiplatform_v1_generated_vizier_service_get_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_GetStudy_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_get_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.get_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "get_trial" + }, + "description": "Sample for GetTrial", + "file": "aiplatform_v1_generated_vizier_service_get_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_GetTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_get_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.get_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.GetTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "get_trial" + }, + "description": "Sample for GetTrial", + "file": "aiplatform_v1_generated_vizier_service_get_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_GetTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_get_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.list_optimal_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListOptimalTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListOptimalTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" + }, + "description": "Sample for ListOptimalTrials", + "file": "aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_ListOptimalTrials_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_list_optimal_trials_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.list_optimal_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListOptimalTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListOptimalTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" + }, + "description": "Sample for ListOptimalTrials", + "file": "aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_ListOptimalTrials_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_list_optimal_trials_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.list_studies", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListStudies", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListStudies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesAsyncPager", + "shortName": "list_studies" + }, + "description": "Sample for ListStudies", + "file": "aiplatform_v1_generated_vizier_service_list_studies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_ListStudies_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_list_studies_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.list_studies", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListStudies", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListStudies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesPager", + "shortName": "list_studies" + }, + "description": "Sample for ListStudies", + "file": "aiplatform_v1_generated_vizier_service_list_studies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_ListStudies_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_list_studies_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.list_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsAsyncPager", + "shortName": "list_trials" + }, + "description": "Sample for ListTrials", + "file": "aiplatform_v1_generated_vizier_service_list_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_ListTrials_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_list_trials_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.list_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.ListTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsPager", + "shortName": "list_trials" + }, + "description": "Sample for ListTrials", + "file": "aiplatform_v1_generated_vizier_service_list_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_ListTrials_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_list_trials_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.lookup_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.LookupStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "LookupStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "lookup_study" + }, + "description": "Sample for LookupStudy", + "file": "aiplatform_v1_generated_vizier_service_lookup_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_LookupStudy_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_lookup_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.lookup_study", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.LookupStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "LookupStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Study", + "shortName": "lookup_study" + }, + "description": "Sample for LookupStudy", + "file": "aiplatform_v1_generated_vizier_service_lookup_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_LookupStudy_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_lookup_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.stop_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.StopTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "StopTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "stop_trial" + }, + "description": "Sample for StopTrial", + "file": "aiplatform_v1_generated_vizier_service_stop_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_StopTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_stop_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.stop_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.StopTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "StopTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Trial", + "shortName": "stop_trial" + }, + "description": "Sample for StopTrial", + "file": "aiplatform_v1_generated_vizier_service_stop_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_StopTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_stop_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceAsyncClient.suggest_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.SuggestTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "SuggestTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "suggest_trials" + }, + "description": "Sample for SuggestTrials", + "file": "aiplatform_v1_generated_vizier_service_suggest_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_SuggestTrials_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 40, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_suggest_trials_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.VizierServiceClient.suggest_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1.VizierService.SuggestTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1.VizierService", + "shortName": "VizierService" + }, + "shortName": "SuggestTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "suggest_trials" + }, + "description": "Sample for SuggestTrials", + "file": "aiplatform_v1_generated_vizier_service_suggest_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_VizierService_SuggestTrials_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 40, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_vizier_service_suggest_trials_sync.py" + } + ] +} diff --git a/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py new file mode 100644 index 0000000000..9b1d7e6e70 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py @@ -0,0 +1,363 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class aiplatformCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'add_context_artifacts_and_executions': ('context', 'artifacts', 'executions', ), + 'add_context_children': ('context', 'child_contexts', ), + 'add_execution_events': ('execution', 'events', ), + 'add_trial_measurement': ('trial_name', 'measurement', ), + 'batch_create_features': ('parent', 'requests', ), + 'batch_create_tensorboard_runs': ('parent', 'requests', ), + 'batch_create_tensorboard_time_series': ('parent', 'requests', ), + 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), + 'batch_read_feature_values': ('featurestore', 'destination', 'entity_type_specs', 'csv_read_instances', 'bigquery_read_instances', 'pass_through_fields', ), + 'batch_read_tensorboard_time_series_data': ('tensorboard', 'time_series', ), + 'cancel_batch_prediction_job': ('name', ), + 'cancel_custom_job': ('name', ), + 'cancel_data_labeling_job': ('name', ), + 'cancel_hyperparameter_tuning_job': ('name', ), + 'cancel_pipeline_job': ('name', ), + 'cancel_training_pipeline': ('name', ), + 'check_trial_early_stopping_state': ('trial_name', ), + 'complete_trial': ('name', 'final_measurement', 'trial_infeasible', 'infeasible_reason', ), + 'create_artifact': ('parent', 'artifact', 'artifact_id', ), + 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), + 'create_context': ('parent', 'context', 'context_id', ), + 'create_custom_job': ('parent', 'custom_job', ), + 'create_data_labeling_job': ('parent', 'data_labeling_job', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_endpoint': ('parent', 'endpoint', 'endpoint_id', ), + 'create_entity_type': ('parent', 'entity_type_id', 'entity_type', ), + 'create_execution': ('parent', 'execution', 'execution_id', ), + 'create_feature': ('parent', 'feature', 'feature_id', ), + 'create_featurestore': ('parent', 'featurestore', 'featurestore_id', ), + 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), + 'create_index': ('parent', 'index', ), + 'create_index_endpoint': ('parent', 'index_endpoint', ), + 'create_metadata_schema': ('parent', 'metadata_schema', 'metadata_schema_id', ), + 'create_metadata_store': ('parent', 'metadata_store', 'metadata_store_id', ), + 'create_model_deployment_monitoring_job': ('parent', 'model_deployment_monitoring_job', ), + 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), + 'create_specialist_pool': ('parent', 'specialist_pool', ), + 'create_study': ('parent', 'study', ), + 'create_tensorboard': ('parent', 'tensorboard', ), + 'create_tensorboard_experiment': ('parent', 'tensorboard_experiment_id', 'tensorboard_experiment', ), + 'create_tensorboard_run': ('parent', 'tensorboard_run', 'tensorboard_run_id', ), + 'create_tensorboard_time_series': ('parent', 'tensorboard_time_series', 'tensorboard_time_series_id', ), + 'create_training_pipeline': ('parent', 'training_pipeline', ), + 'create_trial': ('parent', 'trial', ), + 'delete_artifact': ('name', 'etag', ), + 'delete_batch_prediction_job': ('name', ), + 'delete_context': ('name', 'force', 'etag', ), + 'delete_custom_job': ('name', ), + 'delete_data_labeling_job': ('name', ), + 'delete_dataset': ('name', ), + 'delete_endpoint': ('name', ), + 'delete_entity_type': ('name', 'force', ), + 'delete_execution': ('name', 'etag', ), + 'delete_feature': ('name', ), + 'delete_featurestore': ('name', 'force', ), + 'delete_hyperparameter_tuning_job': ('name', ), + 'delete_index': ('name', ), + 'delete_index_endpoint': ('name', ), + 'delete_metadata_store': ('name', 'force', ), + 'delete_model': ('name', ), + 'delete_model_deployment_monitoring_job': ('name', ), + 'delete_model_version': ('name', ), + 'delete_pipeline_job': ('name', ), + 'delete_specialist_pool': ('name', 'force', ), + 'delete_study': ('name', ), + 'delete_tensorboard': ('name', ), + 'delete_tensorboard_experiment': ('name', ), + 'delete_tensorboard_run': ('name', ), + 'delete_tensorboard_time_series': ('name', ), + 'delete_training_pipeline': ('name', ), + 'delete_trial': ('name', ), + 'deploy_index': ('index_endpoint', 'deployed_index', ), + 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), + 'explain': ('endpoint', 'instances', 'parameters', 'explanation_spec_override', 'deployed_model_id', ), + 'export_data': ('name', 'export_config', ), + 'export_feature_values': ('entity_type', 'destination', 'feature_selector', 'snapshot_export', 'full_export', 'settings', ), + 'export_model': ('name', 'output_config', ), + 'export_tensorboard_time_series_data': ('tensorboard_time_series', 'filter', 'page_size', 'page_token', 'order_by', ), + 'get_annotation_spec': ('name', 'read_mask', ), + 'get_artifact': ('name', ), + 'get_batch_prediction_job': ('name', ), + 'get_context': ('name', ), + 'get_custom_job': ('name', ), + 'get_data_labeling_job': ('name', ), + 'get_dataset': ('name', 'read_mask', ), + 'get_endpoint': ('name', ), + 'get_entity_type': ('name', ), + 'get_execution': ('name', ), + 'get_feature': ('name', ), + 'get_featurestore': ('name', ), + 'get_hyperparameter_tuning_job': ('name', ), + 'get_index': ('name', ), + 'get_index_endpoint': ('name', ), + 'get_metadata_schema': ('name', ), + 'get_metadata_store': ('name', ), + 'get_model': ('name', ), + 'get_model_deployment_monitoring_job': ('name', ), + 'get_model_evaluation': ('name', ), + 'get_model_evaluation_slice': ('name', ), + 'get_pipeline_job': ('name', ), + 'get_specialist_pool': ('name', ), + 'get_study': ('name', ), + 'get_tensorboard': ('name', ), + 'get_tensorboard_experiment': ('name', ), + 'get_tensorboard_run': ('name', ), + 'get_tensorboard_time_series': ('name', ), + 'get_training_pipeline': ('name', ), + 'get_trial': ('name', ), + 'import_data': ('name', 'import_configs', ), + 'import_feature_values': ('entity_type', 'feature_specs', 'avro_source', 'bigquery_source', 'csv_source', 'feature_time_field', 'feature_time', 'entity_id_field', 'disable_online_serving', 'worker_count', 'disable_ingestion_analysis', ), + 'import_model_evaluation': ('parent', 'model_evaluation', ), + 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_artifacts': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_contexts': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_entity_types': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_executions': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_features': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', 'latest_stats_count', ), + 'list_featurestores': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_index_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_indexes': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_metadata_schemas': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_metadata_stores': ('parent', 'page_size', 'page_token', ), + 'list_model_deployment_monitoring_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_model_versions': ('name', 'page_size', 'page_token', 'filter', 'read_mask', ), + 'list_optimal_trials': ('parent', ), + 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', ), + 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), + 'list_studies': ('parent', 'page_token', 'page_size', ), + 'list_tensorboard_experiments': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboard_runs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboards': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboard_time_series': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_trials': ('parent', 'page_token', 'page_size', ), + 'lookup_study': ('parent', 'display_name', ), + 'merge_version_aliases': ('name', 'version_aliases', ), + 'mutate_deployed_index': ('index_endpoint', 'deployed_index', ), + 'pause_model_deployment_monitoring_job': ('name', ), + 'predict': ('endpoint', 'instances', 'parameters', ), + 'purge_artifacts': ('parent', 'filter', 'force', ), + 'purge_contexts': ('parent', 'filter', 'force', ), + 'purge_executions': ('parent', 'filter', 'force', ), + 'query_artifact_lineage_subgraph': ('artifact', 'max_hops', 'filter', ), + 'query_context_lineage_subgraph': ('context', ), + 'query_execution_inputs_and_outputs': ('execution', ), + 'raw_predict': ('endpoint', 'http_body', ), + 'read_feature_values': ('entity_type', 'entity_id', 'feature_selector', ), + 'read_tensorboard_blob_data': ('time_series', 'blob_ids', ), + 'read_tensorboard_time_series_data': ('tensorboard_time_series', 'max_data_points', 'filter', ), + 'resume_model_deployment_monitoring_job': ('name', ), + 'search_features': ('location', 'query', 'page_size', 'page_token', ), + 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), + 'search_model_deployment_monitoring_stats_anomalies': ('model_deployment_monitoring_job', 'deployed_model_id', 'objectives', 'feature_display_name', 'page_size', 'page_token', 'start_time', 'end_time', ), + 'stop_trial': ('name', ), + 'streaming_read_feature_values': ('entity_type', 'entity_ids', 'feature_selector', ), + 'suggest_trials': ('parent', 'suggestion_count', 'client_id', ), + 'undeploy_index': ('index_endpoint', 'deployed_index_id', ), + 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), + 'update_artifact': ('artifact', 'update_mask', 'allow_missing', ), + 'update_context': ('context', 'update_mask', 'allow_missing', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_endpoint': ('endpoint', 'update_mask', ), + 'update_entity_type': ('entity_type', 'update_mask', ), + 'update_execution': ('execution', 'update_mask', 'allow_missing', ), + 'update_feature': ('feature', 'update_mask', ), + 'update_featurestore': ('featurestore', 'update_mask', ), + 'update_index': ('index', 'update_mask', ), + 'update_index_endpoint': ('index_endpoint', 'update_mask', ), + 'update_model': ('model', 'update_mask', ), + 'update_model_deployment_monitoring_job': ('model_deployment_monitoring_job', 'update_mask', ), + 'update_specialist_pool': ('specialist_pool', 'update_mask', ), + 'update_tensorboard': ('update_mask', 'tensorboard', ), + 'update_tensorboard_experiment': ('update_mask', 'tensorboard_experiment', ), + 'update_tensorboard_run': ('update_mask', 'tensorboard_run', ), + 'update_tensorboard_time_series': ('update_mask', 'tensorboard_time_series', ), + 'upload_model': ('parent', 'model', 'parent_model', 'model_id', ), + 'write_tensorboard_experiment_data': ('tensorboard_experiment', 'write_run_data_requests', ), + 'write_tensorboard_run_data': ('tensorboard_run', 'time_series_data', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=aiplatformCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the aiplatform client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py new file mode 100644 index 0000000000..407ff3a88e --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class definitionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=definitionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the definition client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py new file mode 100644 index 0000000000..907918ad25 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py new file mode 100644 index 0000000000..b868205c2b --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py new file mode 100644 index 0000000000..69756d1b09 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py new file mode 100644 index 0000000000..012e5bc408 --- /dev/null +++ b/owl-bot-staging/v1/setup.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-trainingjob-definition', + author="Google LLC", + author_email="googleapis-packages@google.com", + url="https://github.com/googleapis/python-google-cloud-aiplatform-v1-schema-trainingjob-definition", + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.trainingjob'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.31.0, < 3.0.0dev', + 'libcst >= 0.2.5', + 'googleapis-common-protos >= 1.55.0, <2.0.0dev', + 'proto-plus >= 1.19.7', + ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py new file mode 100644 index 0000000000..8e9b43f5a0 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -0,0 +1,5583 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceClient +from google.cloud.aiplatform_v1.services.dataset_service import pagers +from google.cloud.aiplatform_v1.services.dataset_service import transports +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DatasetServiceClient._get_default_mtls_endpoint(None) is None + assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), +]) +def test_dataset_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.DatasetServiceGrpcTransport, "grpc"), + (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), +]) +def test_dataset_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_dataset_service_client_get_transport_class(): + transport = DatasetServiceClient.get_transport_class() + available_transports = [ + transports.DatasetServiceGrpcTransport, + ] + assert transport in available_transports + + transport = DatasetServiceClient.get_transport_class("grpc") + assert transport == transports.DatasetServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +def test_dataset_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, DatasetServiceAsyncClient +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +def test_dataset_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", grpc_helpers), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_dataset_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = DatasetServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", grpc_helpers), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_dataset_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.CreateDatasetRequest, + dict, +]) +def test_create_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_dataset_async_from_dict(): + await test_create_dataset_async(request_type=dict) + + +def test_create_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + + +def test_create_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.GetDatasetRequest, + dict, +]) +def test_get_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_dataset_async_from_dict(): + await test_get_dataset_async(request_type=dict) + + +def test_get_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = dataset.Dataset() + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.UpdateDatasetRequest, + dict, +]) +def test_update_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_dataset_async_from_dict(): + await test_update_dataset_async(request_type=dict) + + +def test_update_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = gca_dataset.Dataset() + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +def test_update_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ListDatasetsRequest, + dict, +]) +def test_list_datasets(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_datasets_async_from_dict(): + await test_list_datasets_async(request_type=dict) + + +def test_list_datasets_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = dataset_service.ListDatasetsResponse() + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_datasets_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_datasets_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_pager(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) +def test_list_datasets_pages(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_datasets(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + dataset_service.DeleteDatasetRequest, + dict, +]) +def test_delete_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_dataset_async_from_dict(): + await test_delete_dataset_async(request_type=dict) + + +def test_delete_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ImportDataRequest, + dict, +]) +def test_import_data(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_data_async_from_dict(): + await test_import_data_async(request_type=dict) + + +def test_import_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_import_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].import_configs + mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + assert arg == mock_val + + +def test_import_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].import_configs + mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ExportDataRequest, + dict, +]) +def test_export_data(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_data_async_from_dict(): + await test_export_data_async(request_type=dict) + + +def test_export_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].export_config + mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + + +def test_export_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].export_config + mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ListDataItemsRequest, + dict, +]) +def test_list_data_items(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_items_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + client.list_data_items() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + +@pytest.mark.asyncio +async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_items_async_from_dict(): + await test_list_data_items_async(request_type=dict) + + +def test_list_data_items_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = dataset_service.ListDataItemsResponse() + client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_items_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_data_items_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_data_items_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_data_items_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_data_items_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + + +def test_list_data_items_pager(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_items(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in results) +def test_list_data_items_pages(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_items(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_items_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_items(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_data_items_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_items(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + dataset_service.GetAnnotationSpecRequest, + dict, +]) +def test_get_annotation_spec(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + )) + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async_from_dict(): + await test_get_annotation_spec_async(request_type=dict) + + +def test_get_annotation_spec_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = annotation_spec.AnnotationSpec() + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_annotation_spec_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_annotation_spec_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ListAnnotationsRequest, + dict, +]) +def test_list_annotations(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_annotations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + client.list_annotations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + +@pytest.mark.asyncio +async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_annotations_async_from_dict(): + await test_list_annotations_async(request_type=dict) + + +def test_list_annotations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = dataset_service.ListAnnotationsResponse() + client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_annotations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_annotations_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_annotations_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_annotations_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_annotations_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + + +def test_list_annotations_pager(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_annotations(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in results) +def test_list_annotations_pages(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_annotations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_annotations_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_annotations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_annotations_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_annotations(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DatasetServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.DatasetServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = DatasetServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.DatasetServiceGrpcTransport, + ) + +def test_dataset_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_dataset_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_dataset', + 'get_dataset', + 'update_dataset', + 'list_datasets', + 'delete_dataset', + 'import_data', + 'export_data', + 'list_data_items', + 'get_annotation_spec', + 'list_annotations', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_dataset_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_dataset_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport() + adc.assert_called_once() + + +def test_dataset_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DatasetServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, + ], +) +def test_dataset_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.DatasetServiceGrpcTransport, grpc_helpers), + (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_dataset_service_host_no_port(transport_name): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_dataset_service_host_with_port(transport_name): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_dataset_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_dataset_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_dataset_service_grpc_lro_client(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_dataset_service_grpc_lro_async_client(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotation_path(): + project = "squid" + location = "clam" + dataset = "whelk" + data_item = "octopus" + annotation = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) + assert expected == actual + + +def test_parse_annotation_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + "data_item": "winkle", + "annotation": "nautilus", + } + path = DatasetServiceClient.annotation_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_path(path) + assert expected == actual + +def test_annotation_spec_path(): + project = "scallop" + location = "abalone" + dataset = "squid" + annotation_spec = "clam" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) + assert expected == actual + + +def test_parse_annotation_spec_path(): + expected = { + "project": "whelk", + "location": "octopus", + "dataset": "oyster", + "annotation_spec": "nudibranch", + } + path = DatasetServiceClient.annotation_spec_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_spec_path(path) + assert expected == actual + +def test_data_item_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + data_item = "nautilus" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) + assert expected == actual + + +def test_parse_data_item_path(): + expected = { + "project": "scallop", + "location": "abalone", + "dataset": "squid", + "data_item": "clam", + } + path = DatasetServiceClient.data_item_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_data_item_path(path) + assert expected == actual + +def test_dataset_path(): + project = "whelk" + location = "octopus" + dataset = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = DatasetServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + } + path = DatasetServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = DatasetServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = DatasetServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = DatasetServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = DatasetServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = DatasetServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = DatasetServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = DatasetServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = DatasetServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = DatasetServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = DatasetServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = DatasetServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py new file mode 100644 index 0000000000..58c16743b9 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -0,0 +1,4536 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceClient +from google.cloud.aiplatform_v1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1.services.endpoint_service import transports +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import explanation_metadata +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert EndpointServiceClient._get_default_mtls_endpoint(None) is None + assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_endpoint_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.EndpointServiceGrpcTransport, "grpc"), + (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_endpoint_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_endpoint_service_client_get_transport_class(): + transport = EndpointServiceClient.get_transport_class() + available_transports = [ + transports.EndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = EndpointServiceClient.get_transport_class("grpc") + assert transport == transports.EndpointServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, EndpointServiceAsyncClient +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +def test_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", grpc_helpers), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_endpoint_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = EndpointServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", grpc_helpers), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_endpoint_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.CreateEndpointRequest, + dict, +]) +def test_create_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + client.create_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + +@pytest.mark.asyncio +async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_endpoint_async_from_dict(): + await test_create_endpoint_async(request_type=dict) + + +def test_create_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].endpoint_id + mock_val = 'endpoint_id_value' + assert arg == mock_val + + +def test_create_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].endpoint_id + mock_val = 'endpoint_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.GetEndpointRequest, + dict, +]) +def test_get_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + ) + response = client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +def test_get_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + client.get_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + +@pytest.mark.asyncio +async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + )) + response = await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +@pytest.mark.asyncio +async def test_get_endpoint_async_from_dict(): + await test_get_endpoint_async(request_type=dict) + + +def test_get_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = endpoint.Endpoint() + client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.ListEndpointsRequest, + dict, +]) +def test_list_endpoints(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + client.list_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + +@pytest.mark.asyncio +async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_endpoints_async_from_dict(): + await test_list_endpoints_async(request_type=dict) + + +def test_list_endpoints_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = endpoint_service.ListEndpointsResponse() + client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_endpoints_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_endpoints_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_endpoints_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + + +def test_list_endpoints_pager(transport_name: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_endpoints(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in results) +def test_list_endpoints_pages(transport_name: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_endpoints(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_endpoints_async_pager(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_endpoints(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_endpoints_async_pages(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_endpoints(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + endpoint_service.UpdateEndpointRequest, + dict, +]) +def test_update_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + ) + response = client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +def test_update_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + client.update_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + +@pytest.mark.asyncio +async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + )) + response = await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +@pytest.mark.asyncio +async def test_update_endpoint_async_from_dict(): + await test_update_endpoint_async(request_type=dict) + + +def test_update_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = gca_endpoint.Endpoint() + client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=name_value', + ) in kw['metadata'] + + +def test_update_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.DeleteEndpointRequest, + dict, +]) +def test_delete_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + client.delete_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + +@pytest.mark.asyncio +async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_endpoint_async_from_dict(): + await test_delete_endpoint_async(request_type=dict) + + +def test_delete_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.DeployModelRequest, + dict, +]) +def test_deploy_model(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_model_async_from_dict(): + await test_deploy_model_async(request_type=dict) + + +def test_deploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_deploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model + mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + + +def test_deploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model + mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.UndeployModelRequest, + dict, +]) +def test_undeploy_model(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_model_async_from_dict(): + await test_undeploy_model_async(request_type=dict) + + +def test_undeploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_undeploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + + +def test_undeploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = EndpointServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.EndpointServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = EndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.EndpointServiceGrpcTransport, + ) + +def test_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_endpoint', + 'get_endpoint', + 'list_endpoints', + 'update_endpoint', + 'delete_endpoint', + 'deploy_model', + 'undeploy_model', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport() + adc.assert_called_once() + + +def test_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + EndpointServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, + ], +) +def test_endpoint_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.EndpointServiceGrpcTransport, grpc_helpers), + (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_endpoint_service_host_no_port(transport_name): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_endpoint_service_host_with_port(transport_name): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_service_grpc_lro_client(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_service_grpc_lro_async_client(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = EndpointServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = EndpointServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = EndpointServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = EndpointServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_deployment_monitoring_job_path(): + project = "squid" + location = "clam" + model_deployment_monitoring_job = "whelk" + expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + actual = EndpointServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) + assert expected == actual + + +def test_parse_model_deployment_monitoring_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model_deployment_monitoring_job": "nudibranch", + } + path = EndpointServiceClient.model_deployment_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_model_deployment_monitoring_job_path(path) + assert expected == actual + +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = EndpointServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = EndpointServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_network_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = EndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = EndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = EndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = EndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = EndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = EndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = EndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = EndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = EndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = EndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = EndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py new file mode 100644 index 0000000000..ee828ae418 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py @@ -0,0 +1,2953 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import transports +from google.cloud.aiplatform_v1.types import feature_selector +from google.cloud.aiplatform_v1.types import featurestore_online_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_featurestore_online_serving_service_client_get_transport_class(): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +def test_featurestore_online_serving_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "true"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "false"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_online_serving_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreOnlineServingServiceClient, FeaturestoreOnlineServingServiceAsyncClient +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +def test_featurestore_online_serving_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_online_serving_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_featurestore_online_serving_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreOnlineServingServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_online_serving_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_online_service.ReadFeatureValuesRequest, + dict, +]) +def test_read_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse( + ) + response = client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + client.read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.ReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse( + )) + response = await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_read_feature_values_async_from_dict(): + await test_read_feature_values_async(request_type=dict) + + +def test_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) + await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_online_service.StreamingReadFeatureValuesRequest, + dict, +]) +def test_streaming_read_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + response = client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_streaming_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + client.streaming_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) + response = await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async_from_dict(): + await test_streaming_read_feature_values_async(request_type=dict) + + +def test_streaming_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) + await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_streaming_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.streaming_read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_streaming_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.streaming_read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreOnlineServingServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ) + +def test_featurestore_online_serving_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_featurestore_online_serving_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'read_feature_values', + 'streaming_read_feature_values', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_featurestore_online_serving_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_featurestore_online_serving_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport() + adc.assert_called_once() + + +def test_featurestore_online_serving_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreOnlineServingServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers), + (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_featurestore_online_serving_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_online_serving_service_host_no_port(transport_name): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_online_serving_service_host_with_port(transport_name): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_featurestore_online_serving_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + actual = FeaturestoreOnlineServingServiceClient.entity_type_path(project, location, featurestore, entity_type) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FeaturestoreOnlineServingServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FeaturestoreOnlineServingServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py new file mode 100644 index 0000000000..b2e145256d --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py @@ -0,0 +1,8173 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.featurestore_service import FeaturestoreServiceAsyncClient +from google.cloud.aiplatform_v1.services.featurestore_service import FeaturestoreServiceClient +from google.cloud.aiplatform_v1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1.services.featurestore_service import transports +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import entity_type +from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1.types import feature +from google.cloud.aiplatform_v1.types import feature as gca_feature +from google.cloud.aiplatform_v1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1.types import feature_selector +from google.cloud.aiplatform_v1.types import featurestore +from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1.types import featurestore_monitoring +from google.cloud.aiplatform_v1.types import featurestore_service +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None + assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FeaturestoreServiceGrpcTransport, "grpc"), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_featurestore_service_client_get_transport_class(): + transport = FeaturestoreServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +def test_featurestore_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "true"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "false"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreServiceClient, FeaturestoreServiceAsyncClient +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +def test_featurestore_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_featurestore_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.CreateFeaturestoreRequest, + dict, +]) +def test_create_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + client.create_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_create_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_featurestore_async_from_dict(): + await test_create_featurestore_async(request_type=dict) + + +def test_create_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_featurestore( + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].featurestore_id + mock_val = 'featurestore_id_value' + assert arg == mock_val + + +def test_create_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_featurestore( + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].featurestore_id + mock_val = 'featurestore_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.GetFeaturestoreRequest, + dict, +]) +def test_get_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore( + name='name_value', + etag='etag_value', + state=featurestore.Featurestore.State.STABLE, + ) + response = client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.state == featurestore.Featurestore.State.STABLE + + +def test_get_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + client.get_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_get_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore( + name='name_value', + etag='etag_value', + state=featurestore.Featurestore.State.STABLE, + )) + response = await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.state == featurestore.Featurestore.State.STABLE + + +@pytest.mark.asyncio +async def test_get_featurestore_async_from_dict(): + await test_get_featurestore_async(request_type=dict) + + +def test_get_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + call.return_value = featurestore.Featurestore() + client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) + await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ListFeaturestoresRequest, + dict, +]) +def test_list_featurestores(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse( + next_page_token='next_page_token_value', + ) + response = client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_featurestores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + client.list_featurestores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + +@pytest.mark.asyncio +async def test_list_featurestores_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturestoresRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_featurestores_async_from_dict(): + await test_list_featurestores_async(request_type=dict) + + +def test_list_featurestores_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + call.return_value = featurestore_service.ListFeaturestoresResponse() + client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_featurestores_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) + await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_featurestores_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_featurestores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_featurestores_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_featurestores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), + parent='parent_value', + ) + + +def test_list_featurestores_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_featurestores(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, featurestore.Featurestore) + for i in results) +def test_list_featurestores_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_featurestores(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_featurestores_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_featurestores(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, featurestore.Featurestore) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_featurestores_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_featurestores(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + featurestore_service.UpdateFeaturestoreRequest, + dict, +]) +def test_update_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + client.update_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_update_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_featurestore_async_from_dict(): + await test_update_featurestore_async(request_type=dict) + + +def test_update_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore.name=name_value', + ) in kw['metadata'] + + +def test_update_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.DeleteFeaturestoreRequest, + dict, +]) +def test_delete_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + client.delete_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_delete_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_featurestore_async_from_dict(): + await test_delete_featurestore_async(request_type=dict) + + +def test_delete_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_featurestore( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + + +def test_delete_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name='name_value', + force=True, + ) + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_featurestore( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name='name_value', + force=True, + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.CreateEntityTypeRequest, + dict, +]) +def test_create_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + client.create_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + +@pytest.mark.asyncio +async def test_create_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_entity_type_async_from_dict(): + await test_create_entity_type_async(request_type=dict) + + +def test_create_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_entity_type( + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].entity_type_id + mock_val = 'entity_type_id_value' + assert arg == mock_val + + +def test_create_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_entity_type( + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].entity_type_id + mock_val = 'entity_type_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.GetEntityTypeRequest, + dict, +]) +def test_get_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + client.get_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + +@pytest.mark.asyncio +async def test_get_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_entity_type_async_from_dict(): + await test_get_entity_type_async(request_type=dict) + + +def test_get_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + call.return_value = entity_type.EntityType() + client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) + await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ListEntityTypesRequest, + dict, +]) +def test_list_entity_types(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_entity_types_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + client.list_entity_types() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + +@pytest.mark.asyncio +async def test_list_entity_types_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListEntityTypesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_entity_types_async_from_dict(): + await test_list_entity_types_async(request_type=dict) + + +def test_list_entity_types_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + call.return_value = featurestore_service.ListEntityTypesResponse() + client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_entity_types_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) + await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_entity_types_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_entity_types( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_entity_types_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_entity_types( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), + parent='parent_value', + ) + + +def test_list_entity_types_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_entity_types(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, entity_type.EntityType) + for i in results) +def test_list_entity_types_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + pages = list(client.list_entity_types(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_entity_types_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_entity_types(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, entity_type.EntityType) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_entity_types_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_entity_types(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + featurestore_service.UpdateEntityTypeRequest, + dict, +]) +def test_update_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + client.update_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + +@pytest.mark.asyncio +async def test_update_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_entity_type_async_from_dict(): + await test_update_entity_type_async(request_type=dict) + + +def test_update_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + call.return_value = gca_entity_type.EntityType() + client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) + await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type.name=name_value', + ) in kw['metadata'] + + +def test_update_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_entity_type( + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_entity_type( + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.DeleteEntityTypeRequest, + dict, +]) +def test_delete_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + client.delete_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + +@pytest.mark.asyncio +async def test_delete_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_entity_type_async_from_dict(): + await test_delete_entity_type_async(request_type=dict) + + +def test_delete_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_entity_type( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + + +def test_delete_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name='name_value', + force=True, + ) + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_entity_type( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name='name_value', + force=True, + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.CreateFeatureRequest, + dict, +]) +def test_create_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + client.create_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + +@pytest.mark.asyncio +async def test_create_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_feature_async_from_dict(): + await test_create_feature_async(request_type=dict) + + +def test_create_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_feature( + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].feature_id + mock_val = 'feature_id_value' + assert arg == mock_val + + +def test_create_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + +@pytest.mark.asyncio +async def test_create_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_feature( + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].feature_id + mock_val = 'feature_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.BatchCreateFeaturesRequest, + dict, +]) +def test_batch_create_features(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_create_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + client.batch_create_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + +@pytest.mark.asyncio +async def test_batch_create_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchCreateFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_create_features_async_from_dict(): + await test_batch_create_features_async(request_type=dict) + + +def test_batch_create_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_create_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_create_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_features( + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] + assert arg == mock_val + + +def test_batch_create_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_features( + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.GetFeatureRequest, + dict, +]) +def test_get_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature( + name='name_value', + description='description_value', + value_type=feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + ) + response = client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +def test_get_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + client.get_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + +@pytest.mark.asyncio +async def test_get_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature( + name='name_value', + description='description_value', + value_type=feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + )) + response = await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +@pytest.mark.asyncio +async def test_get_feature_async_from_dict(): + await test_get_feature_async(request_type=dict) + + +def test_get_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + call.return_value = feature.Feature() + client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_feature( + featurestore_service.GetFeatureRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_feature( + featurestore_service.GetFeatureRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ListFeaturesRequest, + dict, +]) +def test_list_features(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + client.list_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + +@pytest.mark.asyncio +async def test_list_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_features_async_from_dict(): + await test_list_features_async(request_type=dict) + + +def test_list_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + call.return_value = featurestore_service.ListFeaturesResponse() + client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) + await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_features( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_features( + featurestore_service.ListFeaturesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_features( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_features( + featurestore_service.ListFeaturesRequest(), + parent='parent_value', + ) + + +def test_list_features_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_features(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) + for i in results) +def test_list_features_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = list(client.list_features(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_features(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_features(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + featurestore_service.UpdateFeatureRequest, + dict, +]) +def test_update_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature( + name='name_value', + description='description_value', + value_type=gca_feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + ) + response = client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +def test_update_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + client.update_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + +@pytest.mark.asyncio +async def test_update_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature( + name='name_value', + description='description_value', + value_type=gca_feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + )) + response = await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +@pytest.mark.asyncio +async def test_update_feature_async_from_dict(): + await test_update_feature_async(request_type=dict) + + +def test_update_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + call.return_value = gca_feature.Feature() + client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'feature.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'feature.name=name_value', + ) in kw['metadata'] + + +def test_update_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_feature( + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_feature( + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.DeleteFeatureRequest, + dict, +]) +def test_delete_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + client.delete_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + +@pytest.mark.asyncio +async def test_delete_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_feature_async_from_dict(): + await test_delete_feature_async(request_type=dict) + + +def test_delete_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_feature( + featurestore_service.DeleteFeatureRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_feature( + featurestore_service.DeleteFeatureRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ImportFeatureValuesRequest, + dict, +]) +def test_import_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + client.import_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_import_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ImportFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_feature_values_async_from_dict(): + await test_import_feature_values_async(request_type=dict) + + +def test_import_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_import_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_import_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.BatchReadFeatureValuesRequest, + dict, +]) +def test_batch_read_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + client.batch_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchReadFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async_from_dict(): + await test_batch_read_feature_values_async(request_type=dict) + + +def test_batch_read_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = 'featurestore_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore=featurestore_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = 'featurestore_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore=featurestore_value', + ) in kw['metadata'] + + +def test_batch_read_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_feature_values( + featurestore='featurestore_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = 'featurestore_value' + assert arg == mock_val + + +def test_batch_read_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore='featurestore_value', + ) + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_feature_values( + featurestore='featurestore_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = 'featurestore_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore='featurestore_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ExportFeatureValuesRequest, + dict, +]) +def test_export_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + client.export_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_export_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ExportFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_feature_values_async_from_dict(): + await test_export_feature_values_async(request_type=dict) + + +def test_export_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_export_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_export_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.SearchFeaturesRequest, + dict, +]) +def test_search_features(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + client.search_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + +@pytest.mark.asyncio +async def test_search_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.SearchFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_features_async_from_dict(): + await test_search_features_async(request_type=dict) + + +def test_search_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = 'location_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + call.return_value = featurestore_service.SearchFeaturesResponse() + client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'location=location_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = 'location_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) + await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'location=location_value', + ) in kw['metadata'] + + +def test_search_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_features( + location='location_value', + query='query_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].location + mock_val = 'location_value' + assert arg == mock_val + arg = args[0].query + mock_val = 'query_value' + assert arg == mock_val + + +def test_search_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_features( + featurestore_service.SearchFeaturesRequest(), + location='location_value', + query='query_value', + ) + +@pytest.mark.asyncio +async def test_search_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_features( + location='location_value', + query='query_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].location + mock_val = 'location_value' + assert arg == mock_val + arg = args[0].query + mock_val = 'query_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_search_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_features( + featurestore_service.SearchFeaturesRequest(), + location='location_value', + query='query_value', + ) + + +def test_search_features_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('location', ''), + )), + ) + pager = client.search_features(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) + for i in results) +def test_search_features_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = list(client.search_features(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_features(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) + for i in responses) + + +@pytest.mark.asyncio +async def test_search_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_features(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = FeaturestoreServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.FeaturestoreServiceGrpcTransport, + ) + +def test_featurestore_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_featurestore_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_featurestore', + 'get_featurestore', + 'list_featurestores', + 'update_featurestore', + 'delete_featurestore', + 'create_entity_type', + 'get_entity_type', + 'list_entity_types', + 'update_entity_type', + 'delete_entity_type', + 'create_feature', + 'batch_create_features', + 'get_feature', + 'list_features', + 'update_feature', + 'delete_feature', + 'import_feature_values', + 'batch_read_feature_values', + 'export_feature_values', + 'search_features', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_featurestore_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_featurestore_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport() + adc.assert_called_once() + + +def test_featurestore_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreServiceGrpcTransport, grpc_helpers), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_featurestore_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_service_host_no_port(transport_name): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_service_host_with_port(transport_name): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_featurestore_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_featurestore_service_grpc_lro_client(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_featurestore_service_grpc_lro_async_client(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + actual = FeaturestoreServiceClient.entity_type_path(project, location, featurestore, entity_type) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_entity_type_path(path) + assert expected == actual + +def test_feature_path(): + project = "winkle" + location = "nautilus" + featurestore = "scallop" + entity_type = "abalone" + feature = "squid" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) + actual = FeaturestoreServiceClient.feature_path(project, location, featurestore, entity_type, feature) + assert expected == actual + + +def test_parse_feature_path(): + expected = { + "project": "clam", + "location": "whelk", + "featurestore": "octopus", + "entity_type": "oyster", + "feature": "nudibranch", + } + path = FeaturestoreServiceClient.feature_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_feature_path(path) + assert expected == actual + +def test_featurestore_path(): + project = "cuttlefish" + location = "mussel" + featurestore = "winkle" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) + actual = FeaturestoreServiceClient.featurestore_path(project, location, featurestore) + assert expected == actual + + +def test_parse_featurestore_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "featurestore": "abalone", + } + path = FeaturestoreServiceClient.featurestore_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_featurestore_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = FeaturestoreServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = FeaturestoreServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = FeaturestoreServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FeaturestoreServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = FeaturestoreServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = FeaturestoreServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = FeaturestoreServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FeaturestoreServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = FeaturestoreServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = FeaturestoreServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py new file mode 100644 index 0000000000..bb17f49580 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -0,0 +1,4694 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.index_endpoint_service import IndexEndpointServiceAsyncClient +from google.cloud.aiplatform_v1.services.index_endpoint_service import IndexEndpointServiceClient +from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1.services.index_endpoint_service import transports +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1.types import index_endpoint_service +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexEndpointServiceClient._get_default_mtls_endpoint(None) is None + assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.IndexEndpointServiceGrpcTransport, "grpc"), + (transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_index_endpoint_service_client_get_transport_class(): + transport = IndexEndpointServiceClient.get_transport_class() + available_transports = [ + transports.IndexEndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexEndpointServiceClient.get_transport_class("grpc") + assert transport == transports.IndexEndpointServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +def test_index_endpoint_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "true"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "false"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + IndexEndpointServiceClient, IndexEndpointServiceAsyncClient +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +def test_index_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", grpc_helpers), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_index_endpoint_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = IndexEndpointServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", grpc_helpers), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_endpoint_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.CreateIndexEndpointRequest, + dict, +]) +def test_create_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + client.create_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_create_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.CreateIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_endpoint_async_from_dict(): + await test_create_index_endpoint_async(request_type=dict) + + +def test_create_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index_endpoint( + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + + +def test_create_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index_endpoint( + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.GetIndexEndpointRequest, + dict, +]) +def test_get_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + ) + response = client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +def test_get_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + client.get_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_get_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.GetIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + )) + response = await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +@pytest.mark.asyncio +async def test_get_index_endpoint_async_from_dict(): + await test_get_index_endpoint_async(request_type=dict) + + +def test_get_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + call.return_value = index_endpoint.IndexEndpoint() + client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) + await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.ListIndexEndpointsRequest, + dict, +]) +def test_list_index_endpoints(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexEndpointsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_index_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + client.list_index_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + +@pytest.mark.asyncio +async def test_list_index_endpoints_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.ListIndexEndpointsRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexEndpointsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_from_dict(): + await test_list_index_endpoints_async(request_type=dict) + + +def test_list_index_endpoints_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_index_endpoints_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) + await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_index_endpoints_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_index_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_index_endpoints_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_index_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), + parent='parent_value', + ) + + +def test_list_index_endpoints_pager(transport_name: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_index_endpoints(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) + for i in results) +def test_list_index_endpoints_pages(transport_name: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_index_endpoints(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pager(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_index_endpoints(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pages(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_index_endpoints(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.UpdateIndexEndpointRequest, + dict, +]) +def test_update_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + ) + response = client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +def test_update_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + client.update_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_update_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UpdateIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + )) + response = await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +@pytest.mark.asyncio +async def test_update_index_endpoint_async_from_dict(): + await test_update_index_endpoint_async(request_type=dict) + + +def test_update_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + + request.index_endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + call.return_value = gca_index_endpoint.IndexEndpoint() + client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + + request.index_endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) + await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint.name=name_value', + ) in kw['metadata'] + + +def test_update_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.DeleteIndexEndpointRequest, + dict, +]) +def test_delete_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + client.delete_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeleteIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async_from_dict(): + await test_delete_index_endpoint_async(request_type=dict) + + +def test_delete_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.DeployIndexRequest, + dict, +]) +def test_deploy_index(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + client.deploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + +@pytest.mark.asyncio +async def test_deploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeployIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_index_async_from_dict(): + await test_deploy_index_async(request_type=dict) + + +def test_deploy_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +def test_deploy_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + + +def test_deploy_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + +@pytest.mark.asyncio +async def test_deploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_deploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.UndeployIndexRequest, + dict, +]) +def test_undeploy_index(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + client.undeploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + +@pytest.mark.asyncio +async def test_undeploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UndeployIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_index_async_from_dict(): + await test_undeploy_index_async(request_type=dict) + + +def test_undeploy_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +def test_undeploy_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_index( + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index_id + mock_val = 'deployed_index_id_value' + assert arg == mock_val + + +def test_undeploy_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_index( + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index_id + mock_val = 'deployed_index_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.MutateDeployedIndexRequest, + dict, +]) +def test_mutate_deployed_index(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_mutate_deployed_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + client.mutate_deployed_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.MutateDeployedIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async_from_dict(): + await test_mutate_deployed_index_async(request_type=dict) + + +def test_mutate_deployed_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +def test_mutate_deployed_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_deployed_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + + +def test_mutate_deployed_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_deployed_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = IndexEndpointServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = IndexEndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.IndexEndpointServiceGrpcTransport, + ) + +def test_index_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.IndexEndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_index_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.IndexEndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_index_endpoint', + 'get_index_endpoint', + 'list_index_endpoints', + 'update_index_endpoint', + 'delete_index_endpoint', + 'deploy_index', + 'undeploy_index', + 'mutate_deployed_index', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_index_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_index_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport() + adc.assert_called_once() + + +def test_index_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexEndpointServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +def test_index_endpoint_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.IndexEndpointServiceGrpcTransport, grpc_helpers), + (transports.IndexEndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_index_endpoint_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_endpoint_service_host_no_port(transport_name): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_endpoint_service_host_with_port(transport_name): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_index_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_endpoint_service_grpc_lro_client(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_endpoint_service_grpc_lro_async_client(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + actual = IndexEndpointServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexEndpointServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_path(path) + assert expected == actual + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + actual = IndexEndpointServiceClient.index_endpoint_path(project, location, index_endpoint) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexEndpointServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = IndexEndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexEndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = IndexEndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexEndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = IndexEndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexEndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = IndexEndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexEndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = IndexEndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexEndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = IndexEndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py new file mode 100644 index 0000000000..5ab14ae343 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py @@ -0,0 +1,3945 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.index_service import IndexServiceAsyncClient +from google.cloud.aiplatform_v1.services.index_service import IndexServiceClient +from google.cloud.aiplatform_v1.services.index_service import pagers +from google.cloud.aiplatform_v1.services.index_service import transports +from google.cloud.aiplatform_v1.types import deployed_index_ref +from google.cloud.aiplatform_v1.types import index +from google.cloud.aiplatform_v1.types import index as gca_index +from google.cloud.aiplatform_v1.types import index_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexServiceClient._get_default_mtls_endpoint(None) is None + assert IndexServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.IndexServiceGrpcTransport, "grpc"), + (transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_index_service_client_get_transport_class(): + transport = IndexServiceClient.get_transport_class() + available_transports = [ + transports.IndexServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexServiceClient.get_transport_class("grpc") + assert transport == transports.IndexServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +def test_index_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "true"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "false"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + IndexServiceClient, IndexServiceAsyncClient +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +def test_index_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", grpc_helpers), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_index_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = IndexServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", grpc_helpers), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.CreateIndexRequest, + dict, +]) +def test_create_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + client.create_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + +@pytest.mark.asyncio +async def test_create_index_async(transport: str = 'grpc_asyncio', request_type=index_service.CreateIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_async_from_dict(): + await test_create_index_async(request_type=dict) + + +def test_create_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index( + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + + +def test_create_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index( + index_service.CreateIndexRequest(), + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index( + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index( + index_service.CreateIndexRequest(), + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.GetIndexRequest, + dict, +]) +def test_get_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index.Index) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_get_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + client.get_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + +@pytest.mark.asyncio +async def test_get_index_async(transport: str = 'grpc_asyncio', request_type=index_service.GetIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index.Index( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index.Index) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_index_async_from_dict(): + await test_get_index_async(request_type=dict) + + +def test_get_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + call.return_value = index.Index() + client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index( + index_service.GetIndexRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index( + index_service.GetIndexRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.ListIndexesRequest, + dict, +]) +def test_list_indexes(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_indexes_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + client.list_indexes() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + +@pytest.mark.asyncio +async def test_list_indexes_async(transport: str = 'grpc_asyncio', request_type=index_service.ListIndexesRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_indexes_async_from_dict(): + await test_list_indexes_async(request_type=dict) + + +def test_list_indexes_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + call.return_value = index_service.ListIndexesResponse() + client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_indexes_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) + await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_indexes_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_indexes( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_indexes_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_indexes( + index_service.ListIndexesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_indexes_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_indexes( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_indexes_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_indexes( + index_service.ListIndexesRequest(), + parent='parent_value', + ) + + +def test_list_indexes_pager(transport_name: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_indexes(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, index.Index) + for i in results) +def test_list_indexes_pages(transport_name: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + pages = list(client.list_indexes(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_indexes_async_pager(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_indexes(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index.Index) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_indexes_async_pages(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_indexes(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + index_service.UpdateIndexRequest, + dict, +]) +def test_update_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + client.update_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + +@pytest.mark.asyncio +async def test_update_index_async(transport: str = 'grpc_asyncio', request_type=index_service.UpdateIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_index_async_from_dict(): + await test_update_index_async(request_type=dict) + + +def test_update_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + + request.index.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + + request.index.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index.name=name_value', + ) in kw['metadata'] + + +def test_update_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index( + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index( + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.DeleteIndexRequest, + dict, +]) +def test_delete_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + client.delete_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + +@pytest.mark.asyncio +async def test_delete_index_async(transport: str = 'grpc_asyncio', request_type=index_service.DeleteIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_async_from_dict(): + await test_delete_index_async(request_type=dict) + + +def test_delete_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index( + index_service.DeleteIndexRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index( + index_service.DeleteIndexRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = IndexServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = IndexServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.IndexServiceGrpcTransport, + ) + +def test_index_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.IndexServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_index_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.IndexServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_index', + 'get_index', + 'list_indexes', + 'update_index', + 'delete_index', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_index_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_index_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport() + adc.assert_called_once() + + +def test_index_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, + ], +) +def test_index_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.IndexServiceGrpcTransport, grpc_helpers), + (transports.IndexServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_index_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_service_host_no_port(transport_name): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_service_host_with_port(transport_name): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_index_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_service_grpc_lro_client(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_service_grpc_lro_async_client(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + actual = IndexServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_path(path) + assert expected == actual + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + actual = IndexServiceClient.index_endpoint_path(project, location, index_endpoint) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = IndexServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = IndexServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = IndexServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = IndexServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = IndexServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = IndexServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py new file mode 100644 index 0000000000..3dfb3e36f7 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -0,0 +1,10658 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient +from google.cloud.aiplatform_v1.services.job_service import JobServiceClient +from google.cloud.aiplatform_v1.services.job_service import pagers +from google.cloud.aiplatform_v1.services.job_service import transports +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import completion_stats +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import explanation_metadata +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1.types import model_monitoring +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import unmanaged_container_model +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobServiceClient._get_default_mtls_endpoint(None) is None + assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), +]) +def test_job_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.JobServiceGrpcTransport, "grpc"), + (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), +]) +def test_job_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_job_service_client_get_transport_class(): + transport = JobServiceClient.get_transport_class() + available_transports = [ + transports.JobServiceGrpcTransport, + ] + assert transport in available_transports + + transport = JobServiceClient.get_transport_class("grpc") + assert transport == transports.JobServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +def test_job_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, JobServiceAsyncClient +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +def test_job_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_job_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = JobServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_job_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateCustomJobRequest, + dict, +]) +def test_create_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + client.create_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + +@pytest.mark.asyncio +async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_custom_job_async_from_dict(): + await test_create_custom_job_async(request_type=dict) + + +def test_create_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = gca_custom_job.CustomJob() + client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].custom_job + mock_val = gca_custom_job.CustomJob(name='name_value') + assert arg == mock_val + + +def test_create_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].custom_job + mock_val = gca_custom_job.CustomJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetCustomJobRequest, + dict, +]) +def test_get_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + client.get_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + +@pytest.mark.asyncio +async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_custom_job_async_from_dict(): + await test_get_custom_job_async(request_type=dict) + + +def test_get_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = custom_job.CustomJob() + client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListCustomJobsRequest, + dict, +]) +def test_list_custom_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_custom_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + client.list_custom_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + +@pytest.mark.asyncio +async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_from_dict(): + await test_list_custom_jobs_async(request_type=dict) + + +def test_list_custom_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = job_service.ListCustomJobsResponse() + client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_custom_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_custom_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_custom_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + + +def test_list_custom_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_custom_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in results) +def test_list_custom_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_custom_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_custom_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_custom_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteCustomJobRequest, + dict, +]) +def test_delete_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + client.delete_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + +@pytest.mark.asyncio +async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_custom_job_async_from_dict(): + await test_delete_custom_job_async(request_type=dict) + + +def test_delete_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelCustomJobRequest, + dict, +]) +def test_cancel_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + client.cancel_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + +@pytest.mark.asyncio +async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_custom_job_async_from_dict(): + await test_cancel_custom_job_async(request_type=dict) + + +def test_cancel_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = None + client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateDataLabelingJobRequest, + dict, +]) +def test_create_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_create_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + client.create_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async_from_dict(): + await test_create_data_labeling_job_async(request_type=dict) + + +def test_create_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = gca_data_labeling_job.DataLabelingJob() + client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].data_labeling_job + mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') + assert arg == mock_val + + +def test_create_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].data_labeling_job + mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetDataLabelingJobRequest, + dict, +]) +def test_get_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_get_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + client.get_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async_from_dict(): + await test_get_data_labeling_job_async(request_type=dict) + + +def test_get_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = data_labeling_job.DataLabelingJob() + client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListDataLabelingJobsRequest, + dict, +]) +def test_list_data_labeling_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_labeling_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + client.list_data_labeling_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_from_dict(): + await test_list_data_labeling_jobs_async(request_type=dict) + + +def test_list_data_labeling_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = job_service.ListDataLabelingJobsResponse() + client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_data_labeling_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_data_labeling_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + + +def test_list_data_labeling_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_labeling_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in results) +def test_list_data_labeling_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_labeling_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_labeling_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_labeling_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteDataLabelingJobRequest, + dict, +]) +def test_delete_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + client.delete_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async_from_dict(): + await test_delete_data_labeling_job_async(request_type=dict) + + +def test_delete_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelDataLabelingJobRequest, + dict, +]) +def test_cancel_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + client.cancel_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async_from_dict(): + await test_cancel_data_labeling_job_async(request_type=dict) + + +def test_cancel_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = None + client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateHyperparameterTuningJobRequest, + dict, +]) +def test_create_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + client.create_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async_from_dict(): + await test_create_hyperparameter_tuning_job_async(request_type=dict) + + +def test_create_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].hyperparameter_tuning_job + mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + assert arg == mock_val + + +def test_create_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].hyperparameter_tuning_job + mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetHyperparameterTuningJobRequest, + dict, +]) +def test_get_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + client.get_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async_from_dict(): + await test_get_hyperparameter_tuning_job_async(request_type=dict) + + +def test_get_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListHyperparameterTuningJobsRequest, + dict, +]) +def test_list_hyperparameter_tuning_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_hyperparameter_tuning_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + client.list_hyperparameter_tuning_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_from_dict(): + await test_list_hyperparameter_tuning_jobs_async(request_type=dict) + + +def test_list_hyperparameter_tuning_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_hyperparameter_tuning_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_hyperparameter_tuning_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + + +def test_list_hyperparameter_tuning_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_hyperparameter_tuning_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in results) +def test_list_hyperparameter_tuning_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_hyperparameter_tuning_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteHyperparameterTuningJobRequest, + dict, +]) +def test_delete_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + client.delete_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async_from_dict(): + await test_delete_hyperparameter_tuning_job_async(request_type=dict) + + +def test_delete_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelHyperparameterTuningJobRequest, + dict, +]) +def test_cancel_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + client.cancel_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async_from_dict(): + await test_cancel_hyperparameter_tuning_job_async(request_type=dict) + + +def test_cancel_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = None + client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateBatchPredictionJobRequest, + dict, +]) +def test_create_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + client.create_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async_from_dict(): + await test_create_batch_prediction_job_async(request_type=dict) + + +def test_create_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].batch_prediction_job + mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') + assert arg == mock_val + + +def test_create_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].batch_prediction_job + mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetBatchPredictionJobRequest, + dict, +]) +def test_get_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + client.get_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async_from_dict(): + await test_get_batch_prediction_job_async(request_type=dict) + + +def test_get_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = batch_prediction_job.BatchPredictionJob() + client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListBatchPredictionJobsRequest, + dict, +]) +def test_list_batch_prediction_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_batch_prediction_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + client.list_batch_prediction_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_from_dict(): + await test_list_batch_prediction_jobs_async(request_type=dict) + + +def test_list_batch_prediction_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = job_service.ListBatchPredictionJobsResponse() + client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_batch_prediction_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_batch_prediction_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + + +def test_list_batch_prediction_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_batch_prediction_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in results) +def test_list_batch_prediction_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_batch_prediction_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_batch_prediction_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteBatchPredictionJobRequest, + dict, +]) +def test_delete_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + client.delete_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async_from_dict(): + await test_delete_batch_prediction_job_async(request_type=dict) + + +def test_delete_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelBatchPredictionJobRequest, + dict, +]) +def test_cancel_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + client.cancel_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async_from_dict(): + await test_cancel_batch_prediction_job_async(request_type=dict) + + +def test_cancel_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = None + client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateModelDeploymentMonitoringJobRequest, + dict, +]) +def test_create_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + ) + response = client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +def test_create_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + client.create_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + )) + response = await client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async_from_dict(): + await test_create_model_deployment_monitoring_job_async(request_type=dict) + + +def test_create_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + await client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model_deployment_monitoring_job( + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + + +def test_create_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model_deployment_monitoring_job( + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + dict, +]) +def test_search_model_deployment_monitoring_stats_anomalies(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_model_deployment_monitoring_stats_anomalies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + client.search_model_deployment_monitoring_stats_anomalies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async(transport: str = 'grpc_asyncio', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict(): + await test_search_model_deployment_monitoring_stats_anomalies_async(request_type=dict) + + +def test_search_model_deployment_monitoring_stats_anomalies_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + request.model_deployment_monitoring_job = 'model_deployment_monitoring_job_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job=model_deployment_monitoring_job_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + request.model_deployment_monitoring_job = 'model_deployment_monitoring_job_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) + await client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job=model_deployment_monitoring_job_value', + ) in kw['metadata'] + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = 'model_deployment_monitoring_job_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = 'model_deployment_monitoring_job_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + +def test_search_model_deployment_monitoring_stats_anomalies_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('model_deployment_monitoring_job', ''), + )), + ) + pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) + for i in results) +def test_search_model_deployment_monitoring_stats_anomalies_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = list(client.search_model_deployment_monitoring_stats_anomalies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_model_deployment_monitoring_stats_anomalies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) + for i in responses) + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_model_deployment_monitoring_stats_anomalies(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.GetModelDeploymentMonitoringJobRequest, + dict, +]) +def test_get_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + ) + response = client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +def test_get_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + client.get_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + )) + response = await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async_from_dict(): + await test_get_model_deployment_monitoring_job_async(request_type=dict) + + +def test_get_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListModelDeploymentMonitoringJobsRequest, + dict, +]) +def test_list_model_deployment_monitoring_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_deployment_monitoring_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + client.list_model_deployment_monitoring_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_from_dict(): + await test_list_model_deployment_monitoring_jobs_async(request_type=dict) + + +def test_list_model_deployment_monitoring_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) + await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_deployment_monitoring_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_deployment_monitoring_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_model_deployment_monitoring_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_deployment_monitoring_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent='parent_value', + ) + + +def test_list_model_deployment_monitoring_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_deployment_monitoring_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in results) +def test_list_model_deployment_monitoring_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_deployment_monitoring_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_deployment_monitoring_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.UpdateModelDeploymentMonitoringJobRequest, + dict, +]) +def test_update_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + client.update_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async_from_dict(): + await test_update_model_deployment_monitoring_job_async(request_type=dict) + + +def test_update_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + + request.model_deployment_monitoring_job.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + + request.model_deployment_monitoring_job.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job.name=name_value', + ) in kw['metadata'] + + +def test_update_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteModelDeploymentMonitoringJobRequest, + dict, +]) +def test_delete_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + client.delete_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async_from_dict(): + await test_delete_model_deployment_monitoring_job_async(request_type=dict) + + +def test_delete_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.PauseModelDeploymentMonitoringJobRequest, + dict, +]) +def test_pause_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_pause_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + client.pause_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async_from_dict(): + await test_pause_model_deployment_monitoring_job_async(request_type=dict) + + +def test_pause_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = None + client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_pause_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.pause_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_pause_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.pause_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ResumeModelDeploymentMonitoringJobRequest, + dict, +]) +def test_resume_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_resume_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + client.resume_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async_from_dict(): + await test_resume_model_deployment_monitoring_job_async(request_type=dict) + + +def test_resume_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = None + client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_resume_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.resume_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_resume_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.resume_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = JobServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = JobServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = JobServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = JobServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.JobServiceGrpcTransport, + ) + +def test_job_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_job_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_custom_job', + 'get_custom_job', + 'list_custom_jobs', + 'delete_custom_job', + 'cancel_custom_job', + 'create_data_labeling_job', + 'get_data_labeling_job', + 'list_data_labeling_jobs', + 'delete_data_labeling_job', + 'cancel_data_labeling_job', + 'create_hyperparameter_tuning_job', + 'get_hyperparameter_tuning_job', + 'list_hyperparameter_tuning_jobs', + 'delete_hyperparameter_tuning_job', + 'cancel_hyperparameter_tuning_job', + 'create_batch_prediction_job', + 'get_batch_prediction_job', + 'list_batch_prediction_jobs', + 'delete_batch_prediction_job', + 'cancel_batch_prediction_job', + 'create_model_deployment_monitoring_job', + 'search_model_deployment_monitoring_stats_anomalies', + 'get_model_deployment_monitoring_job', + 'list_model_deployment_monitoring_jobs', + 'update_model_deployment_monitoring_job', + 'delete_model_deployment_monitoring_job', + 'pause_model_deployment_monitoring_job', + 'resume_model_deployment_monitoring_job', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_job_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_job_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport() + adc.assert_called_once() + + +def test_job_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, + ], +) +def test_job_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.JobServiceGrpcTransport, grpc_helpers), + (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_job_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_job_service_host_no_port(transport_name): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_job_service_host_with_port(transport_name): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_job_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_job_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_service_grpc_lro_client(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_service_grpc_lro_async_client(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_batch_prediction_job_path(): + project = "squid" + location = "clam" + batch_prediction_job = "whelk" + expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) + assert expected == actual + + +def test_parse_batch_prediction_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "batch_prediction_job": "nudibranch", + } + path = JobServiceClient.batch_prediction_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_batch_prediction_job_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "cuttlefish" + location = "mussel" + custom_job = "winkle" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = JobServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "custom_job": "abalone", + } + path = JobServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_data_labeling_job_path(): + project = "squid" + location = "clam" + data_labeling_job = "whelk" + expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) + assert expected == actual + + +def test_parse_data_labeling_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "data_labeling_job": "nudibranch", + } + path = JobServiceClient.data_labeling_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_data_labeling_job_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = JobServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = JobServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = JobServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = JobServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_hyperparameter_tuning_job_path(): + project = "cuttlefish" + location = "mussel" + hyperparameter_tuning_job = "winkle" + expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) + assert expected == actual + + +def test_parse_hyperparameter_tuning_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "hyperparameter_tuning_job": "abalone", + } + path = JobServiceClient.hyperparameter_tuning_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = JobServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = JobServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_deployment_monitoring_job_path(): + project = "cuttlefish" + location = "mussel" + model_deployment_monitoring_job = "winkle" + expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + actual = JobServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) + assert expected == actual + + +def test_parse_model_deployment_monitoring_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model_deployment_monitoring_job": "abalone", + } + path = JobServiceClient.model_deployment_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path) + assert expected == actual + +def test_network_path(): + project = "squid" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = JobServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = JobServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_network_path(path) + assert expected == actual + +def test_tensorboard_path(): + project = "oyster" + location = "nudibranch" + tensorboard = "cuttlefish" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + actual = JobServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + } + path = JobServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_tensorboard_path(path) + assert expected == actual + +def test_trial_path(): + project = "scallop" + location = "abalone" + study = "squid" + trial = "clam" + expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + actual = JobServiceClient.trial_path(project, location, study, trial) + assert expected == actual + + +def test_parse_trial_path(): + expected = { + "project": "whelk", + "location": "octopus", + "study": "oyster", + "trial": "nudibranch", + } + path = JobServiceClient.trial_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_trial_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = JobServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = JobServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = JobServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = JobServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = JobServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = JobServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = JobServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = JobServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = JobServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = JobServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = JobServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (JobServiceClient, transports.JobServiceGrpcTransport), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py new file mode 100644 index 0000000000..96a1b7aceb --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py @@ -0,0 +1,11200 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.metadata_service import MetadataServiceAsyncClient +from google.cloud.aiplatform_v1.services.metadata_service import MetadataServiceClient +from google.cloud.aiplatform_v1.services.metadata_service import pagers +from google.cloud.aiplatform_v1.services.metadata_service import transports +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import context as gca_context +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import event +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import lineage_subgraph +from google.cloud.aiplatform_v1.types import metadata_schema +from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1.types import metadata_service +from google.cloud.aiplatform_v1.types import metadata_store +from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MetadataServiceClient._get_default_mtls_endpoint(None) is None + assert MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), +]) +def test_metadata_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MetadataServiceGrpcTransport, "grpc"), + (transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_metadata_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), +]) +def test_metadata_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_metadata_service_client_get_transport_class(): + transport = MetadataServiceClient.get_transport_class() + available_transports = [ + transports.MetadataServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MetadataServiceClient.get_transport_class("grpc") + assert transport == transports.MetadataServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +def test_metadata_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "true"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "false"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_metadata_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + MetadataServiceClient, MetadataServiceAsyncClient +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +def test_metadata_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_metadata_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", grpc_helpers), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_metadata_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_metadata_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = MetadataServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", grpc_helpers), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_metadata_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateMetadataStoreRequest, + dict, +]) +def test_create_metadata_store(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + client.create_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + +@pytest.mark.asyncio +async def test_create_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_metadata_store_async_from_dict(): + await test_create_metadata_store_async(request_type=dict) + + +def test_create_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_store( + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_store + mock_val = gca_metadata_store.MetadataStore(name='name_value') + assert arg == mock_val + arg = args[0].metadata_store_id + mock_val = 'metadata_store_id_value' + assert arg == mock_val + + +def test_create_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_store( + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_store + mock_val = gca_metadata_store.MetadataStore(name='name_value') + assert arg == mock_val + arg = args[0].metadata_store_id + mock_val = 'metadata_store_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetMetadataStoreRequest, + dict, +]) +def test_get_metadata_store(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore( + name='name_value', + description='description_value', + ) + response = client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == 'name_value' + assert response.description == 'description_value' + + +def test_get_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + client.get_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + +@pytest.mark.asyncio +async def test_get_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore( + name='name_value', + description='description_value', + )) + response = await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == 'name_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_metadata_store_async_from_dict(): + await test_get_metadata_store_async(request_type=dict) + + +def test_get_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + call.return_value = metadata_store.MetadataStore() + client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) + await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListMetadataStoresRequest, + dict, +]) +def test_list_metadata_stores(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse( + next_page_token='next_page_token_value', + ) + response = client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_metadata_stores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + client.list_metadata_stores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + +@pytest.mark.asyncio +async def test_list_metadata_stores_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataStoresRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_from_dict(): + await test_list_metadata_stores_async(request_type=dict) + + +def test_list_metadata_stores_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + call.return_value = metadata_service.ListMetadataStoresResponse() + client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_metadata_stores_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) + await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_metadata_stores_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_stores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_metadata_stores_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_stores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), + parent='parent_value', + ) + + +def test_list_metadata_stores_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_metadata_stores(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) + for i in results) +def test_list_metadata_stores_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_stores(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_stores(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_stores(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteMetadataStoreRequest, + dict, +]) +def test_delete_metadata_store(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + client.delete_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + +@pytest.mark.asyncio +async def test_delete_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async_from_dict(): + await test_delete_metadata_store_async(request_type=dict) + + +def test_delete_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateArtifactRequest, + dict, +]) +def test_create_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + client.create_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + +@pytest.mark.asyncio +async def test_create_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_artifact_async_from_dict(): + await test_create_artifact_async(request_type=dict) + + +def test_create_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + call.return_value = gca_artifact.Artifact() + client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_artifact( + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].artifact_id + mock_val = 'artifact_id_value' + assert arg == mock_val + + +def test_create_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + +@pytest.mark.asyncio +async def test_create_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_artifact( + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].artifact_id + mock_val = 'artifact_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetArtifactRequest, + dict, +]) +def test_get_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + client.get_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + +@pytest.mark.asyncio +async def test_get_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_artifact_async_from_dict(): + await test_get_artifact_async(request_type=dict) + + +def test_get_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + call.return_value = artifact.Artifact() + client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_artifact( + metadata_service.GetArtifactRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_artifact( + metadata_service.GetArtifactRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListArtifactsRequest, + dict, +]) +def test_list_artifacts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + client.list_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + +@pytest.mark.asyncio +async def test_list_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListArtifactsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_artifacts_async_from_dict(): + await test_list_artifacts_async(request_type=dict) + + +def test_list_artifacts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + call.return_value = metadata_service.ListArtifactsResponse() + client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) + await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_artifacts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_artifacts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_artifacts( + metadata_service.ListArtifactsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_artifacts( + metadata_service.ListArtifactsRequest(), + parent='parent_value', + ) + + +def test_list_artifacts_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_artifacts(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, artifact.Artifact) + for i in results) +def test_list_artifacts_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + pages = list(client.list_artifacts(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_artifacts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_artifacts(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, artifact.Artifact) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_artifacts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_artifacts(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.UpdateArtifactRequest, + dict, +]) +def test_update_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + client.update_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + +@pytest.mark.asyncio +async def test_update_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_artifact_async_from_dict(): + await test_update_artifact_async(request_type=dict) + + +def test_update_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + call.return_value = gca_artifact.Artifact() + client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact.name=name_value', + ) in kw['metadata'] + + +def test_update_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_artifact( + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_artifact( + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteArtifactRequest, + dict, +]) +def test_delete_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + client.delete_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + +@pytest.mark.asyncio +async def test_delete_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_artifact_async_from_dict(): + await test_delete_artifact_async(request_type=dict) + + +def test_delete_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_artifact( + metadata_service.DeleteArtifactRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_artifact( + metadata_service.DeleteArtifactRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.PurgeArtifactsRequest, + dict, +]) +def test_purge_artifacts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + client.purge_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + +@pytest.mark.asyncio +async def test_purge_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeArtifactsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_artifacts_async_from_dict(): + await test_purge_artifacts_async(request_type=dict) + + +def test_purge_artifacts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_purge_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_purge_artifacts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_purge_artifacts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_artifacts( + metadata_service.PurgeArtifactsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_purge_artifacts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_purge_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_artifacts( + metadata_service.PurgeArtifactsRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateContextRequest, + dict, +]) +def test_create_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + client.create_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + +@pytest.mark.asyncio +async def test_create_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_context_async_from_dict(): + await test_create_context_async(request_type=dict) + + +def test_create_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + call.return_value = gca_context.Context() + client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_context( + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].context_id + mock_val = 'context_id_value' + assert arg == mock_val + + +def test_create_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_context( + metadata_service.CreateContextRequest(), + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + +@pytest.mark.asyncio +async def test_create_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_context( + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].context_id + mock_val = 'context_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_context( + metadata_service.CreateContextRequest(), + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetContextRequest, + dict, +]) +def test_get_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + client.get_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + +@pytest.mark.asyncio +async def test_get_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_context_async_from_dict(): + await test_get_context_async(request_type=dict) + + +def test_get_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + call.return_value = context.Context() + client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_context( + metadata_service.GetContextRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_context( + metadata_service.GetContextRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListContextsRequest, + dict, +]) +def test_list_contexts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + client.list_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + +@pytest.mark.asyncio +async def test_list_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListContextsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_contexts_async_from_dict(): + await test_list_contexts_async(request_type=dict) + + +def test_list_contexts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + call.return_value = metadata_service.ListContextsResponse() + client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_contexts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) + await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_contexts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_contexts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_contexts( + metadata_service.ListContextsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_contexts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_contexts( + metadata_service.ListContextsRequest(), + parent='parent_value', + ) + + +def test_list_contexts_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_contexts(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, context.Context) + for i in results) +def test_list_contexts_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + pages = list(client.list_contexts(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_contexts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_contexts(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, context.Context) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_contexts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_contexts(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.UpdateContextRequest, + dict, +]) +def test_update_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + client.update_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + +@pytest.mark.asyncio +async def test_update_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_context_async_from_dict(): + await test_update_context_async(request_type=dict) + + +def test_update_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + call.return_value = gca_context.Context() + client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context.name=name_value', + ) in kw['metadata'] + + +def test_update_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_context( + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_context( + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteContextRequest, + dict, +]) +def test_delete_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + client.delete_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + +@pytest.mark.asyncio +async def test_delete_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_context_async_from_dict(): + await test_delete_context_async(request_type=dict) + + +def test_delete_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_context( + metadata_service.DeleteContextRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_context( + metadata_service.DeleteContextRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.PurgeContextsRequest, + dict, +]) +def test_purge_contexts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + client.purge_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + +@pytest.mark.asyncio +async def test_purge_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeContextsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_contexts_async_from_dict(): + await test_purge_contexts_async(request_type=dict) + + +def test_purge_contexts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_purge_contexts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_purge_contexts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_purge_contexts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_contexts( + metadata_service.PurgeContextsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_purge_contexts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_purge_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_contexts( + metadata_service.PurgeContextsRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.AddContextArtifactsAndExecutionsRequest, + dict, +]) +def test_add_context_artifacts_and_executions(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse( + ) + response = client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) + + +def test_add_context_artifacts_and_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + client.add_context_artifacts_and_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse( + )) + response = await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async_from_dict(): + await test_add_context_artifacts_and_executions_async(request_type=dict) + + +def test_add_context_artifacts_and_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) + await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +def test_add_context_artifacts_and_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_artifacts_and_executions( + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].artifacts + mock_val = ['artifacts_value'] + assert arg == mock_val + arg = args[0].executions + mock_val = ['executions_value'] + assert arg == mock_val + + +def test_add_context_artifacts_and_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_artifacts_and_executions( + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].artifacts + mock_val = ['artifacts_value'] + assert arg == mock_val + arg = args[0].executions + mock_val = ['executions_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.AddContextChildrenRequest, + dict, +]) +def test_add_context_children(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse( + ) + response = client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +def test_add_context_children_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + client.add_context_children() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + +@pytest.mark.asyncio +async def test_add_context_children_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextChildrenRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse( + )) + response = await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +@pytest.mark.asyncio +async def test_add_context_children_async_from_dict(): + await test_add_context_children_async(request_type=dict) + + +def test_add_context_children_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + call.return_value = metadata_service.AddContextChildrenResponse() + client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_context_children_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) + await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +def test_add_context_children_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_children( + context='context_value', + child_contexts=['child_contexts_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].child_contexts + mock_val = ['child_contexts_value'] + assert arg == mock_val + + +def test_add_context_children_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context='context_value', + child_contexts=['child_contexts_value'], + ) + +@pytest.mark.asyncio +async def test_add_context_children_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_children( + context='context_value', + child_contexts=['child_contexts_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].child_contexts + mock_val = ['child_contexts_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_add_context_children_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context='context_value', + child_contexts=['child_contexts_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.QueryContextLineageSubgraphRequest, + dict, +]) +def test_query_context_lineage_subgraph(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_context_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + client.query_context_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryContextLineageSubgraphRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async_from_dict(): + await test_query_context_lineage_subgraph_async(request_type=dict) + + +def test_query_context_lineage_subgraph_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +def test_query_context_lineage_subgraph_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_context_lineage_subgraph( + context='context_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + + +def test_query_context_lineage_subgraph_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context='context_value', + ) + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_context_lineage_subgraph( + context='context_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context='context_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateExecutionRequest, + dict, +]) +def test_create_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + client.create_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + +@pytest.mark.asyncio +async def test_create_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_execution_async_from_dict(): + await test_create_execution_async(request_type=dict) + + +def test_create_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + call.return_value = gca_execution.Execution() + client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_execution( + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].execution_id + mock_val = 'execution_id_value' + assert arg == mock_val + + +def test_create_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_execution( + metadata_service.CreateExecutionRequest(), + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + +@pytest.mark.asyncio +async def test_create_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_execution( + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].execution_id + mock_val = 'execution_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_execution( + metadata_service.CreateExecutionRequest(), + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetExecutionRequest, + dict, +]) +def test_get_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution( + name='name_value', + display_name='display_name_value', + state=execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + client.get_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + +@pytest.mark.asyncio +async def test_get_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution( + name='name_value', + display_name='display_name_value', + state=execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_execution_async_from_dict(): + await test_get_execution_async(request_type=dict) + + +def test_get_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + call.return_value = execution.Execution() + client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_execution( + metadata_service.GetExecutionRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_execution( + metadata_service.GetExecutionRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListExecutionsRequest, + dict, +]) +def test_list_executions(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + client.list_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + +@pytest.mark.asyncio +async def test_list_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_executions_async_from_dict(): + await test_list_executions_async(request_type=dict) + + +def test_list_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + call.return_value = metadata_service.ListExecutionsResponse() + client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) + await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_executions( + metadata_service.ListExecutionsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_executions( + metadata_service.ListExecutionsRequest(), + parent='parent_value', + ) + + +def test_list_executions_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_executions(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, execution.Execution) + for i in results) +def test_list_executions_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + pages = list(client.list_executions(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_executions_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_executions(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, execution.Execution) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_executions_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_executions(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.UpdateExecutionRequest, + dict, +]) +def test_update_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + client.update_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + +@pytest.mark.asyncio +async def test_update_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_execution_async_from_dict(): + await test_update_execution_async(request_type=dict) + + +def test_update_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + call.return_value = gca_execution.Execution() + client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution.name=name_value', + ) in kw['metadata'] + + +def test_update_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_execution( + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_execution( + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteExecutionRequest, + dict, +]) +def test_delete_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + client.delete_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + +@pytest.mark.asyncio +async def test_delete_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_execution_async_from_dict(): + await test_delete_execution_async(request_type=dict) + + +def test_delete_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_execution( + metadata_service.DeleteExecutionRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_execution( + metadata_service.DeleteExecutionRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.PurgeExecutionsRequest, + dict, +]) +def test_purge_executions(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + client.purge_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + +@pytest.mark.asyncio +async def test_purge_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_executions_async_from_dict(): + await test_purge_executions_async(request_type=dict) + + +def test_purge_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_purge_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_purge_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_purge_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_executions( + metadata_service.PurgeExecutionsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_purge_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_purge_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_executions( + metadata_service.PurgeExecutionsRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.AddExecutionEventsRequest, + dict, +]) +def test_add_execution_events(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse( + ) + response = client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +def test_add_execution_events_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + client.add_execution_events() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + +@pytest.mark.asyncio +async def test_add_execution_events_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddExecutionEventsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse( + )) + response = await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +@pytest.mark.asyncio +async def test_add_execution_events_async_from_dict(): + await test_add_execution_events_async(request_type=dict) + + +def test_add_execution_events_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + call.return_value = metadata_service.AddExecutionEventsResponse() + client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_execution_events_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) + await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +def test_add_execution_events_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_execution_events( + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + arg = args[0].events + mock_val = [event.Event(artifact='artifact_value')] + assert arg == mock_val + + +def test_add_execution_events_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_execution_events( + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + arg = args[0].events + mock_val = [event.Event(artifact='artifact_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.QueryExecutionInputsAndOutputsRequest, + dict, +]) +def test_query_execution_inputs_and_outputs(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_execution_inputs_and_outputs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + client.query_execution_inputs_and_outputs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async_from_dict(): + await test_query_execution_inputs_and_outputs_async(request_type=dict) + + +def test_query_execution_inputs_and_outputs_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +def test_query_execution_inputs_and_outputs_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_execution_inputs_and_outputs( + execution='execution_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + + +def test_query_execution_inputs_and_outputs_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution='execution_value', + ) + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_execution_inputs_and_outputs( + execution='execution_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution='execution_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateMetadataSchemaRequest, + dict, +]) +def test_create_metadata_schema(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + ) + response = client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +def test_create_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + client.create_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + +@pytest.mark.asyncio +async def test_create_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataSchemaRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + )) + response = await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async_from_dict(): + await test_create_metadata_schema_async(request_type=dict) + + +def test_create_metadata_schema_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + call.return_value = gca_metadata_schema.MetadataSchema() + client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) + await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_metadata_schema_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_schema( + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_schema + mock_val = gca_metadata_schema.MetadataSchema(name='name_value') + assert arg == mock_val + arg = args[0].metadata_schema_id + mock_val = 'metadata_schema_id_value' + assert arg == mock_val + + +def test_create_metadata_schema_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_schema( + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_schema + mock_val = gca_metadata_schema.MetadataSchema(name='name_value') + assert arg == mock_val + arg = args[0].metadata_schema_id + mock_val = 'metadata_schema_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetMetadataSchemaRequest, + dict, +]) +def test_get_metadata_schema(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + ) + response = client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +def test_get_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + client.get_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + +@pytest.mark.asyncio +async def test_get_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataSchemaRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + )) + response = await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async_from_dict(): + await test_get_metadata_schema_async(request_type=dict) + + +def test_get_metadata_schema_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + call.return_value = metadata_schema.MetadataSchema() + client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) + await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_metadata_schema_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_schema( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_metadata_schema_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_schema( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListMetadataSchemasRequest, + dict, +]) +def test_list_metadata_schemas(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse( + next_page_token='next_page_token_value', + ) + response = client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_metadata_schemas_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + client.list_metadata_schemas() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataSchemasRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_from_dict(): + await test_list_metadata_schemas_async(request_type=dict) + + +def test_list_metadata_schemas_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + call.return_value = metadata_service.ListMetadataSchemasResponse() + client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) + await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_metadata_schemas_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_schemas( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_metadata_schemas_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_schemas( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), + parent='parent_value', + ) + + +def test_list_metadata_schemas_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_metadata_schemas(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) + for i in results) +def test_list_metadata_schemas_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_schemas(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_schemas(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_schemas(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.QueryArtifactLineageSubgraphRequest, + dict, +]) +def test_query_artifact_lineage_subgraph(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_artifact_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + client.query_artifact_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async_from_dict(): + await test_query_artifact_lineage_subgraph_async(request_type=dict) + + +def test_query_artifact_lineage_subgraph_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = 'artifact_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact=artifact_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = 'artifact_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact=artifact_value', + ) in kw['metadata'] + + +def test_query_artifact_lineage_subgraph_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_artifact_lineage_subgraph( + artifact='artifact_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = 'artifact_value' + assert arg == mock_val + + +def test_query_artifact_lineage_subgraph_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact='artifact_value', + ) + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_artifact_lineage_subgraph( + artifact='artifact_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = 'artifact_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact='artifact_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MetadataServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MetadataServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = MetadataServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.MetadataServiceGrpcTransport, + ) + +def test_metadata_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_metadata_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_metadata_store', + 'get_metadata_store', + 'list_metadata_stores', + 'delete_metadata_store', + 'create_artifact', + 'get_artifact', + 'list_artifacts', + 'update_artifact', + 'delete_artifact', + 'purge_artifacts', + 'create_context', + 'get_context', + 'list_contexts', + 'update_context', + 'delete_context', + 'purge_contexts', + 'add_context_artifacts_and_executions', + 'add_context_children', + 'query_context_lineage_subgraph', + 'create_execution', + 'get_execution', + 'list_executions', + 'update_execution', + 'delete_execution', + 'purge_executions', + 'add_execution_events', + 'query_execution_inputs_and_outputs', + 'create_metadata_schema', + 'get_metadata_schema', + 'list_metadata_schemas', + 'query_artifact_lineage_subgraph', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_metadata_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_metadata_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport() + adc.assert_called_once() + + +def test_metadata_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MetadataServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MetadataServiceGrpcTransport, grpc_helpers), + (transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_metadata_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_metadata_service_host_no_port(transport_name): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_metadata_service_host_with_port(transport_name): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_metadata_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_metadata_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_metadata_service_grpc_lro_client(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_metadata_service_grpc_lro_async_client(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + actual = MetadataServiceClient.artifact_path(project, location, metadata_store, artifact) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = MetadataServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_artifact_path(path) + assert expected == actual + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + actual = MetadataServiceClient.context_path(project, location, metadata_store, context) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = MetadataServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_context_path(path) + assert expected == actual + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + actual = MetadataServiceClient.execution_path(project, location, metadata_store, execution) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = MetadataServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_execution_path(path) + assert expected == actual + +def test_metadata_schema_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + metadata_schema = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) + actual = MetadataServiceClient.metadata_schema_path(project, location, metadata_store, metadata_schema) + assert expected == actual + + +def test_parse_metadata_schema_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "metadata_schema": "mussel", + } + path = MetadataServiceClient.metadata_schema_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_schema_path(path) + assert expected == actual + +def test_metadata_store_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) + actual = MetadataServiceClient.metadata_store_path(project, location, metadata_store) + assert expected == actual + + +def test_parse_metadata_store_path(): + expected = { + "project": "abalone", + "location": "squid", + "metadata_store": "clam", + } + path = MetadataServiceClient.metadata_store_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_store_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MetadataServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = MetadataServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format(folder=folder, ) + actual = MetadataServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = MetadataServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MetadataServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = MetadataServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format(project=project, ) + actual = MetadataServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = MetadataServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MetadataServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = MetadataServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = MetadataServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py new file mode 100644 index 0000000000..1602df6ef9 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -0,0 +1,3321 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceClient +from google.cloud.aiplatform_v1.services.migration_service import pagers +from google.cloud.aiplatform_v1.services.migration_service import transports +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MigrationServiceClient._get_default_mtls_endpoint(None) is None + assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), +]) +def test_migration_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MigrationServiceGrpcTransport, "grpc"), + (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), +]) +def test_migration_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_migration_service_client_get_transport_class(): + transport = MigrationServiceClient.get_transport_class() + available_transports = [ + transports.MigrationServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MigrationServiceClient.get_transport_class("grpc") + assert transport == transports.MigrationServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +def test_migration_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, MigrationServiceAsyncClient +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +def test_migration_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", grpc_helpers), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_migration_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = MigrationServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", grpc_helpers), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_migration_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + migration_service.SearchMigratableResourcesRequest, + dict, +]) +def test_search_migratable_resources(request_type, transport: str = 'grpc'): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_migratable_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + client.search_migratable_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + +@pytest.mark.asyncio +async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_from_dict(): + await test_search_migratable_resources_async(request_type=dict) + + +def test_search_migratable_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = migration_service.SearchMigratableResourcesResponse() + client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_migratable_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_search_migratable_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_search_migratable_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + + +def test_search_migratable_resources_pager(transport_name: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.search_migratable_resources(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in results) +def test_search_migratable_resources_pages(transport_name: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = list(client.search_migratable_resources(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pager(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_migratable_resources(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in responses) + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pages(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_migratable_resources(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + migration_service.BatchMigrateResourcesRequest, + dict, +]) +def test_batch_migrate_resources(request_type, transport: str = 'grpc'): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_migrate_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + client.batch_migrate_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async_from_dict(): + await test_batch_migrate_resources_async(request_type=dict) + + +def test_batch_migrate_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_migrate_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].migrate_resource_requests + mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + assert arg == mock_val + + +def test_batch_migrate_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].migrate_resource_requests + mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MigrationServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MigrationServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = MigrationServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.MigrationServiceGrpcTransport, + ) + +def test_migration_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_migration_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'search_migratable_resources', + 'batch_migrate_resources', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_migration_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_migration_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport() + adc.assert_called_once() + + +def test_migration_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MigrationServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, + ], +) +def test_migration_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MigrationServiceGrpcTransport, grpc_helpers), + (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_migration_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_migration_service_host_no_port(transport_name): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_migration_service_host_with_port(transport_name): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_migration_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_migration_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_migration_service_grpc_lro_client(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_migration_service_grpc_lro_async_client(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotated_dataset_path(): + project = "squid" + dataset = "clam" + annotated_dataset = "whelk" + expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) + assert expected == actual + + +def test_parse_annotated_dataset_path(): + expected = { + "project": "octopus", + "dataset": "oyster", + "annotated_dataset": "nudibranch", + } + path = MigrationServiceClient.annotated_dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_annotated_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "squid" + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "winkle", + "dataset": "nautilus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_model_path(): + project = "scallop" + location = "abalone" + model = "squid" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_path(): + project = "oyster" + location = "nudibranch" + model = "cuttlefish" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "mussel", + "location": "winkle", + "model": "nautilus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_version_path(): + project = "scallop" + model = "abalone" + version = "squid" + expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + actual = MigrationServiceClient.version_path(project, model, version) + assert expected == actual + + +def test_parse_version_path(): + expected = { + "project": "clam", + "model": "whelk", + "version": "octopus", + } + path = MigrationServiceClient.version_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_version_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MigrationServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = MigrationServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format(folder=folder, ) + actual = MigrationServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = MigrationServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MigrationServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = MigrationServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format(project=project, ) + actual = MigrationServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = MigrationServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MigrationServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = MigrationServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = MigrationServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py new file mode 100644 index 0000000000..b74642c303 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -0,0 +1,6884 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.model_service import ModelServiceAsyncClient +from google.cloud.aiplatform_v1.services.model_service import ModelServiceClient +from google.cloud.aiplatform_v1.services.model_service import pagers +from google.cloud.aiplatform_v1.services.model_service import transports +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import explanation_metadata +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ModelServiceClient._get_default_mtls_endpoint(None) is None + assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), +]) +def test_model_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ModelServiceGrpcTransport, "grpc"), + (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), +]) +def test_model_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_model_service_client_get_transport_class(): + transport = ModelServiceClient.get_transport_class() + available_transports = [ + transports.ModelServiceGrpcTransport, + ] + assert transport in available_transports + + transport = ModelServiceClient.get_transport_class("grpc") + assert transport == transports.ModelServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +def test_model_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, ModelServiceAsyncClient +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +def test_model_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", grpc_helpers), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_model_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ModelServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", grpc_helpers), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_model_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.UploadModelRequest, + dict, +]) +def test_upload_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_upload_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + client.upload_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + +@pytest.mark.asyncio +async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_upload_model_async_from_dict(): + await test_upload_model_async(request_type=dict) + + +def test_upload_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_upload_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_upload_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + + +def test_upload_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_upload_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_upload_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.GetModelRequest, + dict, +]) +def test_get_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelsRequest, + dict, +]) +def test_list_models(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = model_service.ListModelsResponse() + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_models_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_models_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) +def test_list_models_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_models(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelVersionsRequest, + dict, +]) +def test_list_model_versions(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelVersionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelVersionsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_versions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + client.list_model_versions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelVersionsRequest() + +@pytest.mark.asyncio +async def test_list_model_versions_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelVersionsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelVersionsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelVersionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelVersionsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_versions_async_from_dict(): + await test_list_model_versions_async(request_type=dict) + + +def test_list_model_versions_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelVersionsRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + call.return_value = model_service.ListModelVersionsResponse() + client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_versions_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelVersionsRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelVersionsResponse()) + await client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_list_model_versions_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_versions( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_list_model_versions_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_versions( + model_service.ListModelVersionsRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_list_model_versions_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelVersionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_versions( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_versions_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_versions( + model_service.ListModelVersionsRequest(), + name='name_value', + ) + + +def test_list_model_versions_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('name', ''), + )), + ) + pager = client.list_model_versions(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) +def test_list_model_versions_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_versions(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_versions_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_versions(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_versions_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_versions(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + model_service.UpdateModelRequest, + dict, +]) +def test_update_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_update_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + client.update_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + +@pytest.mark.asyncio +async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_model_async_from_dict(): + await test_update_model_async(request_type=dict) + + +def test_update_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = gca_model.Model() + client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=name_value', + ) in kw['metadata'] + + +def test_update_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.DeleteModelRequest, + dict, +]) +def test_delete_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_async_from_dict(): + await test_delete_model_async(request_type=dict) + + +def test_delete_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.DeleteModelVersionRequest, + dict, +]) +def test_delete_model_version(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_version_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + client.delete_model_version() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + +@pytest.mark.asyncio +async def test_delete_model_version_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelVersionRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_version_async_from_dict(): + await test_delete_model_version_async(request_type=dict) + + +def test_delete_model_version_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelVersionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_version_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelVersionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_version_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_version( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_version_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_version( + model_service.DeleteModelVersionRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_version_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_version( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_version_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_version( + model_service.DeleteModelVersionRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.MergeVersionAliasesRequest, + dict, +]) +def test_merge_version_aliases(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.MergeVersionAliasesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_merge_version_aliases_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + client.merge_version_aliases() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.MergeVersionAliasesRequest() + +@pytest.mark.asyncio +async def test_merge_version_aliases_async(transport: str = 'grpc_asyncio', request_type=model_service.MergeVersionAliasesRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.MergeVersionAliasesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_merge_version_aliases_async_from_dict(): + await test_merge_version_aliases_async(request_type=dict) + + +def test_merge_version_aliases_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.MergeVersionAliasesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + call.return_value = model.Model() + client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_merge_version_aliases_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.MergeVersionAliasesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_merge_version_aliases_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.merge_version_aliases( + name='name_value', + version_aliases=['version_aliases_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].version_aliases + mock_val = ['version_aliases_value'] + assert arg == mock_val + + +def test_merge_version_aliases_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.merge_version_aliases( + model_service.MergeVersionAliasesRequest(), + name='name_value', + version_aliases=['version_aliases_value'], + ) + +@pytest.mark.asyncio +async def test_merge_version_aliases_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.merge_version_aliases( + name='name_value', + version_aliases=['version_aliases_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].version_aliases + mock_val = ['version_aliases_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_merge_version_aliases_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.merge_version_aliases( + model_service.MergeVersionAliasesRequest(), + name='name_value', + version_aliases=['version_aliases_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ExportModelRequest, + dict, +]) +def test_export_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_model_async_from_dict(): + await test_export_model_async(request_type=dict) + + +def test_export_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + assert arg == mock_val + + +def test_export_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ImportModelEvaluationRequest, + dict, +]) +def test_import_model_evaluation(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + data_item_schema_uri='data_item_schema_uri_value', + annotation_schema_uri='annotation_schema_uri_value', + ) + response = client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + assert response.data_item_schema_uri == 'data_item_schema_uri_value' + assert response.annotation_schema_uri == 'annotation_schema_uri_value' + + +def test_import_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + client.import_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + +@pytest.mark.asyncio +async def test_import_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.ImportModelEvaluationRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + data_item_schema_uri='data_item_schema_uri_value', + annotation_schema_uri='annotation_schema_uri_value', + )) + response = await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + assert response.data_item_schema_uri == 'data_item_schema_uri_value' + assert response.annotation_schema_uri == 'annotation_schema_uri_value' + + +@pytest.mark.asyncio +async def test_import_model_evaluation_async_from_dict(): + await test_import_model_evaluation_async(request_type=dict) + + +def test_import_model_evaluation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + call.return_value = gca_model_evaluation.ModelEvaluation() + client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_evaluation.ModelEvaluation()) + await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_import_model_evaluation_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_model_evaluation( + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name='name_value') + assert arg == mock_val + + +def test_import_model_evaluation_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_model_evaluation( + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.GetModelEvaluationRequest, + dict, +]) +def test_get_model_evaluation(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + data_item_schema_uri='data_item_schema_uri_value', + annotation_schema_uri='annotation_schema_uri_value', + ) + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + assert response.data_item_schema_uri == 'data_item_schema_uri_value' + assert response.annotation_schema_uri == 'annotation_schema_uri_value' + + +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + data_item_schema_uri='data_item_schema_uri_value', + annotation_schema_uri='annotation_schema_uri_value', + )) + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + assert response.data_item_schema_uri == 'data_item_schema_uri_value' + assert response.annotation_schema_uri == 'annotation_schema_uri_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async_from_dict(): + await test_get_model_evaluation_async(request_type=dict) + + +def test_get_model_evaluation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = model_evaluation.ModelEvaluation() + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_evaluation_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_evaluation_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelEvaluationsRequest, + dict, +]) +def test_list_model_evaluations(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_from_dict(): + await test_list_model_evaluations_async(request_type=dict) + + +def test_list_model_evaluations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationsResponse() + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_evaluations_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_model_evaluations_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluations_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) +def test_list_model_evaluations_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluations(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + model_service.GetModelEvaluationSliceRequest, + dict, +]) +def test_get_model_evaluation_slice(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + ) + response = client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +def test_get_model_evaluation_slice_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + client.get_model_evaluation_slice() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + )) + response = await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async_from_dict(): + await test_get_model_evaluation_slice_async(request_type=dict) + + +def test_get_model_evaluation_slice_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_evaluation_slice_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_evaluation_slice_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelEvaluationSlicesRequest, + dict, +]) +def test_list_model_evaluation_slices(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluation_slices_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + client.list_model_evaluation_slices() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_from_dict(): + await test_list_model_evaluation_slices_async(request_type=dict) + + +def test_list_model_evaluation_slices_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationSlicesResponse() + client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_evaluation_slices_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_model_evaluation_slices_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluation_slices_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluation_slices(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in results) +def test_list_model_evaluation_slices_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluation_slices(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluation_slices(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluation_slices(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ModelServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ModelServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = ModelServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ModelServiceGrpcTransport, + ) + +def test_model_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_model_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'upload_model', + 'get_model', + 'list_models', + 'list_model_versions', + 'update_model', + 'delete_model', + 'delete_model_version', + 'merge_version_aliases', + 'export_model', + 'import_model_evaluation', + 'get_model_evaluation', + 'list_model_evaluations', + 'get_model_evaluation_slice', + 'list_model_evaluation_slices', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_model_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_model_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport() + adc.assert_called_once() + + +def test_model_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +def test_model_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ModelServiceGrpcTransport, grpc_helpers), + (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_model_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_model_service_host_no_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_model_service_host_with_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_model_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_model_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_service_grpc_lro_client(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_service_grpc_lro_async_client(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = ModelServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = ModelServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = ModelServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = ModelServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_evaluation_path(): + project = "squid" + location = "clam" + model = "whelk" + evaluation = "octopus" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) + assert expected == actual + + +def test_parse_model_evaluation_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "model": "cuttlefish", + "evaluation": "mussel", + } + path = ModelServiceClient.model_evaluation_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_path(path) + assert expected == actual + +def test_model_evaluation_slice_path(): + project = "winkle" + location = "nautilus" + model = "scallop" + evaluation = "abalone" + slice = "squid" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) + assert expected == actual + + +def test_parse_model_evaluation_slice_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + "evaluation": "oyster", + "slice": "nudibranch", + } + path = ModelServiceClient.model_evaluation_slice_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_slice_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "cuttlefish" + location = "mussel" + training_pipeline = "winkle" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "training_pipeline": "abalone", + } + path = ModelServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ModelServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ModelServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ModelServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ModelServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ModelServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ModelServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ModelServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ModelServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ModelServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ModelServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = ModelServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py new file mode 100644 index 0000000000..7d4126f8a6 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -0,0 +1,5529 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceClient +from google.cloud.aiplatform_v1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1.services.pipeline_service import transports +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import explanation_metadata +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_failure_policy +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.aiplatform_v1.types import value +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PipelineServiceClient._get_default_mtls_endpoint(None) is None + assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), +]) +def test_pipeline_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PipelineServiceGrpcTransport, "grpc"), + (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), +]) +def test_pipeline_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_pipeline_service_client_get_transport_class(): + transport = PipelineServiceClient.get_transport_class() + available_transports = [ + transports.PipelineServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PipelineServiceClient.get_transport_class("grpc") + assert transport == transports.PipelineServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, PipelineServiceAsyncClient +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +def test_pipeline_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", grpc_helpers), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_pipeline_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PipelineServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", grpc_helpers), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_pipeline_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CreateTrainingPipelineRequest, + dict, +]) +def test_create_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_create_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + client.create_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_training_pipeline_async_from_dict(): + await test_create_training_pipeline_async(request_type=dict) + + +def test_create_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = gca_training_pipeline.TrainingPipeline() + client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].training_pipeline + mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') + assert arg == mock_val + + +def test_create_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].training_pipeline + mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.GetTrainingPipelineRequest, + dict, +]) +def test_get_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_get_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + client.get_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_training_pipeline_async_from_dict(): + await test_get_training_pipeline_async(request_type=dict) + + +def test_get_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = training_pipeline.TrainingPipeline() + client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.ListTrainingPipelinesRequest, + dict, +]) +def test_list_training_pipelines(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_training_pipelines_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + client.list_training_pipelines() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + +@pytest.mark.asyncio +async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_from_dict(): + await test_list_training_pipelines_async(request_type=dict) + + +def test_list_training_pipelines_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_training_pipelines_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_training_pipelines_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_training_pipelines_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + + +def test_list_training_pipelines_pager(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_training_pipelines(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in results) +def test_list_training_pipelines_pages(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = list(client.list_training_pipelines(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_training_pipelines(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_training_pipelines(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + pipeline_service.DeleteTrainingPipelineRequest, + dict, +]) +def test_delete_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + client.delete_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async_from_dict(): + await test_delete_training_pipeline_async(request_type=dict) + + +def test_delete_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CancelTrainingPipelineRequest, + dict, +]) +def test_cancel_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + client.cancel_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async_from_dict(): + await test_cancel_training_pipeline_async(request_type=dict) + + +def test_cancel_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = None + client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CreatePipelineJobRequest, + dict, +]) +def test_create_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + ) + response = client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +def test_create_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + client.create_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + +@pytest.mark.asyncio +async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + )) + response = await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async_from_dict(): + await test_create_pipeline_job_async(request_type=dict) + + +def test_create_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = gca_pipeline_job.PipelineJob() + client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].pipeline_job + mock_val = gca_pipeline_job.PipelineJob(name='name_value') + assert arg == mock_val + arg = args[0].pipeline_job_id + mock_val = 'pipeline_job_id_value' + assert arg == mock_val + + +def test_create_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].pipeline_job + mock_val = gca_pipeline_job.PipelineJob(name='name_value') + assert arg == mock_val + arg = args[0].pipeline_job_id + mock_val = 'pipeline_job_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.GetPipelineJobRequest, + dict, +]) +def test_get_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + ) + response = client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +def test_get_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + client.get_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + +@pytest.mark.asyncio +async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + )) + response = await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async_from_dict(): + await test_get_pipeline_job_async(request_type=dict) + + +def test_get_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = pipeline_job.PipelineJob() + client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.ListPipelineJobsRequest, + dict, +]) +def test_list_pipeline_jobs(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_pipeline_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + client.list_pipeline_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_from_dict(): + await test_list_pipeline_jobs_async(request_type=dict) + + +def test_list_pipeline_jobs_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = pipeline_service.ListPipelineJobsResponse() + client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_pipeline_jobs_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_pipeline_jobs_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + + +def test_list_pipeline_jobs_pager(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_pipeline_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in results) +def test_list_pipeline_jobs_pages(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_pipeline_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_pipeline_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_pipeline_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + pipeline_service.DeletePipelineJobRequest, + dict, +]) +def test_delete_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + client.delete_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async_from_dict(): + await test_delete_pipeline_job_async(request_type=dict) + + +def test_delete_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CancelPipelineJobRequest, + dict, +]) +def test_cancel_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + client.cancel_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async_from_dict(): + await test_cancel_pipeline_job_async(request_type=dict) + + +def test_cancel_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = None + client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PipelineServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PipelineServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = PipelineServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PipelineServiceGrpcTransport, + ) + +def test_pipeline_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_pipeline_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_training_pipeline', + 'get_training_pipeline', + 'list_training_pipelines', + 'delete_training_pipeline', + 'cancel_training_pipeline', + 'create_pipeline_job', + 'get_pipeline_job', + 'list_pipeline_jobs', + 'delete_pipeline_job', + 'cancel_pipeline_job', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_pipeline_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_pipeline_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport() + adc.assert_called_once() + + +def test_pipeline_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PipelineServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, + ], +) +def test_pipeline_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PipelineServiceGrpcTransport, grpc_helpers), + (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_pipeline_service_host_no_port(transport_name): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_pipeline_service_host_with_port(transport_name): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_pipeline_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_pipeline_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_pipeline_service_grpc_lro_client(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_pipeline_service_grpc_lro_async_client(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = PipelineServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_artifact_path(path) + assert expected == actual + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + actual = PipelineServiceClient.context_path(project, location, metadata_store, context) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = PipelineServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_context_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "oyster" + location = "nudibranch" + custom_job = "cuttlefish" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = PipelineServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "custom_job": "nautilus", + } + path = PipelineServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_endpoint_path(): + project = "scallop" + location = "abalone" + endpoint = "squid" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PipelineServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "clam", + "location": "whelk", + "endpoint": "octopus", + } + path = PipelineServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = PipelineServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_execution_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PipelineServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = PipelineServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_model_path(path) + assert expected == actual + +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = PipelineServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = PipelineServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_network_path(path) + assert expected == actual + +def test_pipeline_job_path(): + project = "scallop" + location = "abalone" + pipeline_job = "squid" + expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "clam", + "location": "whelk", + "pipeline_job": "octopus", + } + path = PipelineServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_pipeline_job_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "oyster" + location = "nudibranch" + training_pipeline = "cuttlefish" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "mussel", + "location": "winkle", + "training_pipeline": "nautilus", + } + path = PipelineServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PipelineServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = PipelineServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = PipelineServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = PipelineServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PipelineServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = PipelineServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = PipelineServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = PipelineServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PipelineServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = PipelineServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PipelineServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py new file mode 100644 index 0000000000..71adbd96f5 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -0,0 +1,3302 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api import httpbody_pb2 # type: ignore +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceClient +from google.cloud.aiplatform_v1.services.prediction_service import transports +from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), +]) +def test_prediction_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PredictionServiceGrpcTransport, "grpc"), + (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), +]) +def test_prediction_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + available_transports = [ + transports.PredictionServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, PredictionServiceAsyncClient +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_prediction_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.PredictRequest, + dict, +]) +def test_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + model='model_value', + model_display_name='model_display_name_value', + ) + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + assert response.model == 'model_value' + assert response.model_display_name == 'model_display_name_value' + + +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + +@pytest.mark.asyncio +async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + model='model_value', + model_display_name='model_display_name_value', + )) + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + assert response.model == 'model_value' + assert response.model_display_name == 'model_display_name_value' + + +@pytest.mark.asyncio +async def test_predict_async_from_dict(): + await test_predict_async(request_type=dict) + + +def test_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = prediction_service.PredictResponse() + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + + +def test_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.RawPredictRequest, + dict, +]) +def test_raw_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody( + content_type='content_type_value', + data=b'data_blob', + ) + response = client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, httpbody_pb2.HttpBody) + assert response.content_type == 'content_type_value' + assert response.data == b'data_blob' + + +def test_raw_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + client.raw_predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + +@pytest.mark.asyncio +async def test_raw_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.RawPredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody( + content_type='content_type_value', + data=b'data_blob', + )) + response = await client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, httpbody_pb2.HttpBody) + assert response.content_type == 'content_type_value' + assert response.data == b'data_blob' + + +@pytest.mark.asyncio +async def test_raw_predict_async_from_dict(): + await test_raw_predict_async(request_type=dict) + + +def test_raw_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.RawPredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + call.return_value = httpbody_pb2.HttpBody() + client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_raw_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.RawPredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) + await client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_raw_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.raw_predict( + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].http_body + mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') + assert arg == mock_val + + +def test_raw_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.raw_predict( + prediction_service.RawPredictRequest(), + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + +@pytest.mark.asyncio +async def test_raw_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.raw_predict( + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].http_body + mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_raw_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.raw_predict( + prediction_service.RawPredictRequest(), + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.ExplainRequest, + dict, +]) +def test_explain(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse( + deployed_model_id='deployed_model_id_value', + ) + response = client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.ExplainResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +def test_explain_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + client.explain() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + +@pytest.mark.asyncio +async def test_explain_async(transport: str = 'grpc_asyncio', request_type=prediction_service.ExplainRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse( + deployed_model_id='deployed_model_id_value', + )) + response = await client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.ExplainResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +@pytest.mark.asyncio +async def test_explain_async_from_dict(): + await test_explain_async(request_type=dict) + + +def test_explain_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ExplainRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + call.return_value = prediction_service.ExplainResponse() + client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_explain_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ExplainRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) + await client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_explain_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.explain( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + + +def test_explain_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.explain( + prediction_service.ExplainRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + +@pytest.mark.asyncio +async def test_explain_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.explain( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_explain_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.explain( + prediction_service.ExplainRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = PredictionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PredictionServiceGrpcTransport, + ) + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'predict', + 'raw_predict', + 'explain', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_prediction_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport() + adc.assert_called_once() + + +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +def test_prediction_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PredictionServiceGrpcTransport, grpc_helpers), + (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_prediction_service_host_no_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_prediction_service_host_with_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_prediction_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PredictionServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = PredictionServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PredictionServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = PredictionServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_model_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PredictionServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = PredictionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = PredictionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = PredictionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PredictionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = PredictionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = PredictionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = PredictionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PredictionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = PredictionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py new file mode 100644 index 0000000000..2b03933e57 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -0,0 +1,3925 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient +from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1.services.specialist_pool_service import transports +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_specialist_pool_service_client_get_transport_class(): + transport = SpecialistPoolServiceClient.get_transport_class() + available_transports = [ + transports.SpecialistPoolServiceGrpcTransport, + ] + assert transport in available_transports + + transport = SpecialistPoolServiceClient.get_transport_class("grpc") + assert transport == transports.SpecialistPoolServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +def test_specialist_pool_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", grpc_helpers), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_specialist_pool_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = SpecialistPoolServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", grpc_helpers), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_specialist_pool_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.CreateSpecialistPoolRequest, + dict, +]) +def test_create_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + client.create_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_specialist_pool_async_from_dict(): + await test_create_specialist_pool_async(request_type=dict) + + +def test_create_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + + +def test_create_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.GetSpecialistPoolRequest, + dict, +]) +def test_get_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + specialist_worker_emails=['specialist_worker_emails_value'], + ) + response = client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + assert response.specialist_worker_emails == ['specialist_worker_emails_value'] + + +def test_get_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + client.get_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + specialist_worker_emails=['specialist_worker_emails_value'], + )) + response = await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + assert response.specialist_worker_emails == ['specialist_worker_emails_value'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_async_from_dict(): + await test_get_specialist_pool_async(request_type=dict) + + +def test_get_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = specialist_pool.SpecialistPool() + client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.ListSpecialistPoolsRequest, + dict, +]) +def test_list_specialist_pools(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_specialist_pools_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + client.list_specialist_pools() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + +@pytest.mark.asyncio +async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_from_dict(): + await test_list_specialist_pools_async(request_type=dict) + + +def test_list_specialist_pools_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_specialist_pools_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_specialist_pools_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_specialist_pools_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + + +def test_list_specialist_pools_pager(transport_name: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_specialist_pools(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in results) +def test_list_specialist_pools_pages(transport_name: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = list(client.list_specialist_pools(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pager(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_specialist_pools(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pages(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_specialist_pools(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.DeleteSpecialistPoolRequest, + dict, +]) +def test_delete_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + client.delete_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async_from_dict(): + await test_delete_specialist_pool_async(request_type=dict) + + +def test_delete_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.UpdateSpecialistPoolRequest, + dict, +]) +def test_update_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + client.update_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_async_from_dict(): + await test_update_specialist_pool_async(request_type=dict) + + +def test_update_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=name_value', + ) in kw['metadata'] + + +def test_update_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SpecialistPoolServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = SpecialistPoolServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.SpecialistPoolServiceGrpcTransport, + ) + +def test_specialist_pool_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_specialist_pool_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_specialist_pool', + 'get_specialist_pool', + 'list_specialist_pools', + 'delete_specialist_pool', + 'update_specialist_pool', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_specialist_pool_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_specialist_pool_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport() + adc.assert_called_once() + + +def test_specialist_pool_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SpecialistPoolServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, + ], +) +def test_specialist_pool_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_specialist_pool_service_host_no_port(transport_name): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_specialist_pool_service_host_with_port(transport_name): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_specialist_pool_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_specialist_pool_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_specialist_pool_service_grpc_lro_client(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_service_grpc_lro_async_client(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_path(): + project = "squid" + location = "clam" + specialist_pool = "whelk" + expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) + assert expected == actual + + +def test_parse_specialist_pool_path(): + expected = { + "project": "octopus", + "location": "oyster", + "specialist_pool": "nudibranch", + } + path = SpecialistPoolServiceClient.specialist_pool_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = SpecialistPoolServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = SpecialistPoolServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = SpecialistPoolServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SpecialistPoolServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = SpecialistPoolServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = SpecialistPoolServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = SpecialistPoolServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SpecialistPoolServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = SpecialistPoolServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = SpecialistPoolServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py new file mode 100644 index 0000000000..451c562c31 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py @@ -0,0 +1,10369 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.tensorboard_service import TensorboardServiceAsyncClient +from google.cloud.aiplatform_v1.services.tensorboard_service import TensorboardServiceClient +from google.cloud.aiplatform_v1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1.services.tensorboard_service import transports +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import tensorboard +from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1.types import tensorboard_data +from google.cloud.aiplatform_v1.types import tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1.types import tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1.types import tensorboard_service +from google.cloud.aiplatform_v1.types import tensorboard_time_series +from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None + assert TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), +]) +def test_tensorboard_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TensorboardServiceGrpcTransport, "grpc"), + (transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_tensorboard_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), +]) +def test_tensorboard_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_tensorboard_service_client_get_transport_class(): + transport = TensorboardServiceClient.get_transport_class() + available_transports = [ + transports.TensorboardServiceGrpcTransport, + ] + assert transport in available_transports + + transport = TensorboardServiceClient.get_transport_class("grpc") + assert transport == transports.TensorboardServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +def test_tensorboard_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "true"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "false"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_tensorboard_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + TensorboardServiceClient, TensorboardServiceAsyncClient +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +def test_tensorboard_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_tensorboard_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", grpc_helpers), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_tensorboard_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_tensorboard_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = TensorboardServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", grpc_helpers), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_tensorboard_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardRequest, + dict, +]) +def test_create_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + client.create_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_tensorboard_async_from_dict(): + await test_create_tensorboard_async(request_type=dict) + + +def test_create_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard( + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + + +def test_create_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard( + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardRequest, + dict, +]) +def test_get_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard( + name='name_value', + display_name='display_name_value', + description='description_value', + blob_storage_path_prefix='blob_storage_path_prefix_value', + run_count=989, + etag='etag_value', + ) + response = client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard.Tensorboard) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' + assert response.run_count == 989 + assert response.etag == 'etag_value' + + +def test_get_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + client.get_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard( + name='name_value', + display_name='display_name_value', + description='description_value', + blob_storage_path_prefix='blob_storage_path_prefix_value', + run_count=989, + etag='etag_value', + )) + response = await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard.Tensorboard) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' + assert response.run_count == 989 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_async_from_dict(): + await test_get_tensorboard_async(request_type=dict) + + +def test_get_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + call.return_value = tensorboard.Tensorboard() + client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) + await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardRequest, + dict, +]) +def test_update_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + client.update_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_tensorboard_async_from_dict(): + await test_update_tensorboard_async(request_type=dict) + + +def test_update_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + + request.tensorboard.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + + request.tensorboard.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardsRequest, + dict, +]) +def test_list_tensorboards(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboards_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + client.list_tensorboards() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + +@pytest.mark.asyncio +async def test_list_tensorboards_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_from_dict(): + await test_list_tensorboards_async(request_type=dict) + + +def test_list_tensorboards_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardsResponse() + client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboards_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) + await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboards_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboards( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboards_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboards( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboards_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboards(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) + for i in results) +def test_list_tensorboards_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboards(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboards(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboards(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardRequest, + dict, +]) +def test_delete_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + client.delete_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_async_from_dict(): + await test_delete_tensorboard_async(request_type=dict) + + +def test_delete_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardExperimentRequest, + dict, +]) +def test_create_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_create_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + client.create_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async_from_dict(): + await test_create_tensorboard_experiment_async(request_type=dict) + + +def test_create_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_experiment( + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_experiment_id + mock_val = 'tensorboard_experiment_id_value' + assert arg == mock_val + + +def test_create_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_experiment( + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_experiment_id + mock_val = 'tensorboard_experiment_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardExperimentRequest, + dict, +]) +def test_get_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_get_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + client.get_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async_from_dict(): + await test_get_tensorboard_experiment_async(request_type=dict) + + +def test_get_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + call.return_value = tensorboard_experiment.TensorboardExperiment() + client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) + await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardExperimentRequest, + dict, +]) +def test_update_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_update_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + client.update_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async_from_dict(): + await test_update_tensorboard_experiment_async(request_type=dict) + + +def test_update_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + + request.tensorboard_experiment.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + + request.tensorboard_experiment.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardExperimentsRequest, + dict, +]) +def test_list_tensorboard_experiments(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardExperimentsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_experiments_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + client.list_tensorboard_experiments() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardExperimentsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_from_dict(): + await test_list_tensorboard_experiments_async(request_type=dict) + + +def test_list_tensorboard_experiments_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) + await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboard_experiments_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_experiments( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboard_experiments_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_experiments( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_experiments_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_experiments(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) + for i in results) +def test_list_tensorboard_experiments_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_experiments(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_experiments(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_experiments(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardExperimentRequest, + dict, +]) +def test_delete_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + client.delete_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async_from_dict(): + await test_delete_tensorboard_experiment_async(request_type=dict) + + +def test_delete_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardRunRequest, + dict, +]) +def test_create_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_create_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + client.create_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async_from_dict(): + await test_create_tensorboard_run_async(request_type=dict) + + +def test_create_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_run( + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_run_id + mock_val = 'tensorboard_run_id_value' + assert arg == mock_val + + +def test_create_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_run( + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_run_id + mock_val = 'tensorboard_run_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.BatchCreateTensorboardRunsRequest, + dict, +]) +def test_batch_create_tensorboard_runs(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse( + ) + response = client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) + + +def test_batch_create_tensorboard_runs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + client.batch_create_tensorboard_runs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardRunsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse( + )) + response = await client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_async_from_dict(): + await test_batch_create_tensorboard_runs_async(request_type=dict) + + +def test_batch_create_tensorboard_runs_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) + await client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_create_tensorboard_runs_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_tensorboard_runs( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] + assert arg == mock_val + + +def test_batch_create_tensorboard_runs_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_tensorboard_runs( + tensorboard_service.BatchCreateTensorboardRunsRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_tensorboard_runs( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_tensorboard_runs( + tensorboard_service.BatchCreateTensorboardRunsRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardRunRequest, + dict, +]) +def test_get_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + client.get_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async_from_dict(): + await test_get_tensorboard_run_async(request_type=dict) + + +def test_get_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + call.return_value = tensorboard_run.TensorboardRun() + client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) + await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardRunRequest, + dict, +]) +def test_update_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + client.update_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async_from_dict(): + await test_update_tensorboard_run_async(request_type=dict) + + +def test_update_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + + request.tensorboard_run.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + + request.tensorboard_run.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardRunsRequest, + dict, +]) +def test_list_tensorboard_runs(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardRunsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_runs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + client.list_tensorboard_runs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardRunsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardRunsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_from_dict(): + await test_list_tensorboard_runs_async(request_type=dict) + + +def test_list_tensorboard_runs_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) + await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboard_runs_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_runs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboard_runs_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_runs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_runs_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_runs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) + for i in results) +def test_list_tensorboard_runs_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_runs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_runs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_runs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardRunRequest, + dict, +]) +def test_delete_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + client.delete_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async_from_dict(): + await test_delete_tensorboard_run_async(request_type=dict) + + +def test_delete_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, + dict, +]) +def test_batch_create_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( + ) + response = client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) + + +def test_batch_create_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + client.batch_create_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( + )) + response = await client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_async_from_dict(): + await test_batch_create_tensorboard_time_series_async(request_type=dict) + + +def test_batch_create_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) + await client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_create_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_tensorboard_time_series( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] + assert arg == mock_val + + +def test_batch_create_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_tensorboard_time_series( + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_tensorboard_time_series( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_tensorboard_time_series( + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardTimeSeriesRequest, + dict, +]) +def test_create_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_create_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + client.create_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async_from_dict(): + await test_create_tensorboard_time_series_async(request_type=dict) + + +def test_create_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_time_series( + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + + +def test_create_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_time_series( + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardTimeSeriesRequest, + dict, +]) +def test_get_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_get_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + client.get_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async_from_dict(): + await test_get_tensorboard_time_series_async(request_type=dict) + + +def test_get_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) + await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardTimeSeriesRequest, + dict, +]) +def test_update_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_update_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + client.update_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async_from_dict(): + await test_update_tensorboard_time_series_async(request_type=dict) + + +def test_update_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + request.tensorboard_time_series.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + request.tensorboard_time_series.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardTimeSeriesRequest, + dict, +]) +def test_list_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardTimeSeriesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + client.list_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_from_dict(): + await test_list_tensorboard_time_series_async(request_type=dict) + + +def test_list_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) + await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_time_series( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_time_series( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_time_series_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_time_series(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in results) +def test_list_tensorboard_time_series_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_time_series(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_time_series(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_time_series(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardTimeSeriesRequest, + dict, +]) +def test_delete_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + client.delete_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async_from_dict(): + await test_delete_tensorboard_time_series_async(request_type=dict) + + +def test_delete_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, + dict, +]) +def test_batch_read_tensorboard_time_series_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( + ) + response = client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) + + +def test_batch_read_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + client.batch_read_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( + )) + response = await client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_async_from_dict(): + await test_batch_read_tensorboard_time_series_data_async(request_type=dict) + + +def test_batch_read_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + request.tensorboard = 'tensorboard_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard=tensorboard_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + request.tensorboard = 'tensorboard_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) + await client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard=tensorboard_value', + ) in kw['metadata'] + + +def test_batch_read_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_tensorboard_time_series_data( + tensorboard='tensorboard_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = 'tensorboard_value' + assert arg == mock_val + + +def test_batch_read_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_tensorboard_time_series_data( + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), + tensorboard='tensorboard_value', + ) + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_tensorboard_time_series_data( + tensorboard='tensorboard_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = 'tensorboard_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_tensorboard_time_series_data( + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), + tensorboard='tensorboard_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ReadTensorboardTimeSeriesDataRequest, + dict, +]) +def test_read_tensorboard_time_series_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse( + ) + response = client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) + + +def test_read_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + client.read_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse( + )) + response = await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async_from_dict(): + await test_read_tensorboard_time_series_data_async(request_type=dict) + + +def test_read_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) + await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +def test_read_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + + +def test_read_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ReadTensorboardBlobDataRequest, + dict, +]) +def test_read_tensorboard_blob_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + response = client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +def test_read_tensorboard_blob_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + client.read_tensorboard_blob_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) + response = await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async_from_dict(): + await test_read_tensorboard_blob_data_async(request_type=dict) + + +def test_read_tensorboard_blob_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + + request.time_series = 'time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'time_series=time_series_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + + request.time_series = 'time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) + await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'time_series=time_series_value', + ) in kw['metadata'] + + +def test_read_tensorboard_blob_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_blob_data( + time_series='time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].time_series + mock_val = 'time_series_value' + assert arg == mock_val + + +def test_read_tensorboard_blob_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series='time_series_value', + ) + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_blob_data( + time_series='time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].time_series + mock_val = 'time_series_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series='time_series_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.WriteTensorboardExperimentDataRequest, + dict, +]) +def test_write_tensorboard_experiment_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse( + ) + response = client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) + + +def test_write_tensorboard_experiment_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + client.write_tensorboard_experiment_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardExperimentDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse( + )) + response = await client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_async_from_dict(): + await test_write_tensorboard_experiment_data_async(request_type=dict) + + +def test_write_tensorboard_experiment_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardExperimentDataRequest() + + request.tensorboard_experiment = 'tensorboard_experiment_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment=tensorboard_experiment_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardExperimentDataRequest() + + request.tensorboard_experiment = 'tensorboard_experiment_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) + await client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment=tensorboard_experiment_value', + ) in kw['metadata'] + + +def test_write_tensorboard_experiment_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_tensorboard_experiment_data( + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = 'tensorboard_experiment_value' + assert arg == mock_val + arg = args[0].write_run_data_requests + mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] + assert arg == mock_val + + +def test_write_tensorboard_experiment_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_tensorboard_experiment_data( + tensorboard_service.WriteTensorboardExperimentDataRequest(), + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_tensorboard_experiment_data( + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = 'tensorboard_experiment_value' + assert arg == mock_val + arg = args[0].write_run_data_requests + mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_tensorboard_experiment_data( + tensorboard_service.WriteTensorboardExperimentDataRequest(), + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.WriteTensorboardRunDataRequest, + dict, +]) +def test_write_tensorboard_run_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse( + ) + response = client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +def test_write_tensorboard_run_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + client.write_tensorboard_run_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardRunDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse( + )) + response = await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async_from_dict(): + await test_write_tensorboard_run_data_async(request_type=dict) + + +def test_write_tensorboard_run_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + + request.tensorboard_run = 'tensorboard_run_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run=tensorboard_run_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + + request.tensorboard_run = 'tensorboard_run_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) + await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run=tensorboard_run_value', + ) in kw['metadata'] + + +def test_write_tensorboard_run_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_tensorboard_run_data( + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = 'tensorboard_run_value' + assert arg == mock_val + arg = args[0].time_series_data + mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] + assert arg == mock_val + + +def test_write_tensorboard_run_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_tensorboard_run_data( + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = 'tensorboard_run_value' + assert arg == mock_val + arg = args[0].time_series_data + mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + dict, +]) +def test_export_tensorboard_time_series_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token='next_page_token_value', + ) + response = client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_export_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + client.export_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token='next_page_token_value', + )) + response = await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_from_dict(): + await test_export_tensorboard_time_series_data_async(request_type=dict) + + +def test_export_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) + await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +def test_export_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + + +def test_export_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +def test_export_tensorboard_time_series_data_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('tensorboard_time_series', ''), + )), + ) + pager = client.export_tensorboard_time_series_data(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) + for i in results) +def test_export_tensorboard_time_series_data_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = list(client.export_tensorboard_time_series_data(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.export_tensorboard_time_series_data(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) + for i in responses) + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.export_tensorboard_time_series_data(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TensorboardServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = TensorboardServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.TensorboardServiceGrpcTransport, + ) + +def test_tensorboard_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TensorboardServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_tensorboard_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TensorboardServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_tensorboard', + 'get_tensorboard', + 'update_tensorboard', + 'list_tensorboards', + 'delete_tensorboard', + 'create_tensorboard_experiment', + 'get_tensorboard_experiment', + 'update_tensorboard_experiment', + 'list_tensorboard_experiments', + 'delete_tensorboard_experiment', + 'create_tensorboard_run', + 'batch_create_tensorboard_runs', + 'get_tensorboard_run', + 'update_tensorboard_run', + 'list_tensorboard_runs', + 'delete_tensorboard_run', + 'batch_create_tensorboard_time_series', + 'create_tensorboard_time_series', + 'get_tensorboard_time_series', + 'update_tensorboard_time_series', + 'list_tensorboard_time_series', + 'delete_tensorboard_time_series', + 'batch_read_tensorboard_time_series_data', + 'read_tensorboard_time_series_data', + 'read_tensorboard_blob_data', + 'write_tensorboard_experiment_data', + 'write_tensorboard_run_data', + 'export_tensorboard_time_series_data', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_tensorboard_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id="octopus", + ) + + +def test_tensorboard_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport() + adc.assert_called_once() + + +def test_tensorboard_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TensorboardServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +def test_tensorboard_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.TensorboardServiceGrpcTransport, grpc_helpers), + (transports.TensorboardServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_tensorboard_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_tensorboard_service_host_no_port(transport_name): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_tensorboard_service_host_with_port(transport_name): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_tensorboard_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_tensorboard_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_tensorboard_service_grpc_lro_client(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_service_grpc_lro_async_client(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "octopus", + "location": "oyster", + "tensorboard": "nudibranch", + } + path = TensorboardServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_path(path) + assert expected == actual + +def test_tensorboard_experiment_path(): + project = "cuttlefish" + location = "mussel" + tensorboard = "winkle" + experiment = "nautilus" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) + actual = TensorboardServiceClient.tensorboard_experiment_path(project, location, tensorboard, experiment) + assert expected == actual + + +def test_parse_tensorboard_experiment_path(): + expected = { + "project": "scallop", + "location": "abalone", + "tensorboard": "squid", + "experiment": "clam", + } + path = TensorboardServiceClient.tensorboard_experiment_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path) + assert expected == actual + +def test_tensorboard_run_path(): + project = "whelk" + location = "octopus" + tensorboard = "oyster" + experiment = "nudibranch" + run = "cuttlefish" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) + actual = TensorboardServiceClient.tensorboard_run_path(project, location, tensorboard, experiment, run) + assert expected == actual + + +def test_parse_tensorboard_run_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + "experiment": "scallop", + "run": "abalone", + } + path = TensorboardServiceClient.tensorboard_run_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_run_path(path) + assert expected == actual + +def test_tensorboard_time_series_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + experiment = "octopus" + run = "oyster" + time_series = "nudibranch" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) + actual = TensorboardServiceClient.tensorboard_time_series_path(project, location, tensorboard, experiment, run, time_series) + assert expected == actual + + +def test_parse_tensorboard_time_series_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + "tensorboard": "winkle", + "experiment": "nautilus", + "run": "scallop", + "time_series": "abalone", + } + path = TensorboardServiceClient.tensorboard_time_series_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TensorboardServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TensorboardServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TensorboardServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TensorboardServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TensorboardServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TensorboardServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TensorboardServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TensorboardServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TensorboardServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TensorboardServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = TensorboardServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py new file mode 100644 index 0000000000..b7bb298181 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py @@ -0,0 +1,6176 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.vizier_service import VizierServiceAsyncClient +from google.cloud.aiplatform_v1.services.vizier_service import VizierServiceClient +from google.cloud.aiplatform_v1.services.vizier_service import pagers +from google.cloud.aiplatform_v1.services.vizier_service import transports +from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import study as gca_study +from google.cloud.aiplatform_v1.types import vizier_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert VizierServiceClient._get_default_mtls_endpoint(None) is None + assert VizierServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), +]) +def test_vizier_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.VizierServiceGrpcTransport, "grpc"), + (transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_vizier_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), +]) +def test_vizier_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_vizier_service_client_get_transport_class(): + transport = VizierServiceClient.get_transport_class() + available_transports = [ + transports.VizierServiceGrpcTransport, + ] + assert transport in available_transports + + transport = VizierServiceClient.get_transport_class("grpc") + assert transport == transports.VizierServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +def test_vizier_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "true"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "false"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_vizier_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + VizierServiceClient, VizierServiceAsyncClient +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +def test_vizier_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_vizier_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", grpc_helpers), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_vizier_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_vizier_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = VizierServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", grpc_helpers), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_vizier_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CreateStudyRequest, + dict, +]) +def test_create_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study( + name='name_value', + display_name='display_name_value', + state=gca_study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_create_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + client.create_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + +@pytest.mark.asyncio +async def test_create_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study( + name='name_value', + display_name='display_name_value', + state=gca_study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_create_study_async_from_dict(): + await test_create_study_async(request_type=dict) + + +def test_create_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + call.return_value = gca_study.Study() + client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) + await client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_study( + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].study + mock_val = gca_study.Study(name='name_value') + assert arg == mock_val + + +def test_create_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_study( + vizier_service.CreateStudyRequest(), + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_study( + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].study + mock_val = gca_study.Study(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_study( + vizier_service.CreateStudyRequest(), + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.GetStudyRequest, + dict, +]) +def test_get_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_get_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + client.get_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + +@pytest.mark.asyncio +async def test_get_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_get_study_async_from_dict(): + await test_get_study_async(request_type=dict) + + +def test_get_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + call.return_value = study.Study() + client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + await client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_study( + vizier_service.GetStudyRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_study( + vizier_service.GetStudyRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.ListStudiesRequest, + dict, +]) +def test_list_studies(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListStudiesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_studies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + client.list_studies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + +@pytest.mark.asyncio +async def test_list_studies_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListStudiesRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListStudiesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_studies_async_from_dict(): + await test_list_studies_async(request_type=dict) + + +def test_list_studies_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListStudiesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + call.return_value = vizier_service.ListStudiesResponse() + client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_studies_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListStudiesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) + await client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_studies_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_studies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_studies_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_studies( + vizier_service.ListStudiesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_studies_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_studies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_studies_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_studies( + vizier_service.ListStudiesRequest(), + parent='parent_value', + ) + + +def test_list_studies_pager(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_studies(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, study.Study) + for i in results) +def test_list_studies_pages(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + pages = list(client.list_studies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_studies_async_pager(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_studies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, study.Study) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_studies_async_pages(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_studies(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + vizier_service.DeleteStudyRequest, + dict, +]) +def test_delete_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + client.delete_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + +@pytest.mark.asyncio +async def test_delete_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_study_async_from_dict(): + await test_delete_study_async(request_type=dict) + + +def test_delete_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + call.return_value = None + client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_study( + vizier_service.DeleteStudyRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_study( + vizier_service.DeleteStudyRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.LookupStudyRequest, + dict, +]) +def test_lookup_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_lookup_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + client.lookup_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + +@pytest.mark.asyncio +async def test_lookup_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.LookupStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_lookup_study_async_from_dict(): + await test_lookup_study_async(request_type=dict) + + +def test_lookup_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.LookupStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + call.return_value = study.Study() + client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_lookup_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.LookupStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + await client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_lookup_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.lookup_study( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_lookup_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.lookup_study( + vizier_service.LookupStudyRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_lookup_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.lookup_study( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_lookup_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.lookup_study( + vizier_service.LookupStudyRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.SuggestTrialsRequest, + dict, +]) +def test_suggest_trials(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_suggest_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + client.suggest_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + +@pytest.mark.asyncio +async def test_suggest_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.SuggestTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_suggest_trials_async_from_dict(): + await test_suggest_trials_async(request_type=dict) + + +def test_suggest_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.SuggestTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_suggest_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.SuggestTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CreateTrialRequest, + dict, +]) +def test_create_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_create_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + client.create_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + +@pytest.mark.asyncio +async def test_create_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_create_trial_async_from_dict(): + await test_create_trial_async(request_type=dict) + + +def test_create_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateTrialRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + call.return_value = study.Trial() + client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateTrialRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_trial( + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].trial + mock_val = study.Trial(name='name_value') + assert arg == mock_val + + +def test_create_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_trial( + vizier_service.CreateTrialRequest(), + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_trial( + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].trial + mock_val = study.Trial(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_trial( + vizier_service.CreateTrialRequest(), + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.GetTrialRequest, + dict, +]) +def test_get_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_get_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + client.get_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + +@pytest.mark.asyncio +async def test_get_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_get_trial_async_from_dict(): + await test_get_trial_async(request_type=dict) + + +def test_get_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + call.return_value = study.Trial() + client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_trial( + vizier_service.GetTrialRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_trial( + vizier_service.GetTrialRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.ListTrialsRequest, + dict, +]) +def test_list_trials(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrialsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + client.list_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + +@pytest.mark.asyncio +async def test_list_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrialsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_trials_async_from_dict(): + await test_list_trials_async(request_type=dict) + + +def test_list_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + call.return_value = vizier_service.ListTrialsResponse() + client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) + await client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_trials_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_trials_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_trials( + vizier_service.ListTrialsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_trials_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_trials_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_trials( + vizier_service.ListTrialsRequest(), + parent='parent_value', + ) + + +def test_list_trials_pager(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_trials(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, study.Trial) + for i in results) +def test_list_trials_pages(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + pages = list(client.list_trials(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_trials_async_pager(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_trials(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, study.Trial) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_trials_async_pages(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_trials(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + vizier_service.AddTrialMeasurementRequest, + dict, +]) +def test_add_trial_measurement(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_add_trial_measurement_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + client.add_trial_measurement() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + +@pytest.mark.asyncio +async def test_add_trial_measurement_async(transport: str = 'grpc_asyncio', request_type=vizier_service.AddTrialMeasurementRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_add_trial_measurement_async_from_dict(): + await test_add_trial_measurement_async(request_type=dict) + + +def test_add_trial_measurement_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.AddTrialMeasurementRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + call.return_value = study.Trial() + client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_trial_measurement_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.AddTrialMeasurementRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CompleteTrialRequest, + dict, +]) +def test_complete_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_complete_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + client.complete_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + +@pytest.mark.asyncio +async def test_complete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CompleteTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_complete_trial_async_from_dict(): + await test_complete_trial_async(request_type=dict) + + +def test_complete_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CompleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + call.return_value = study.Trial() + client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_complete_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CompleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.DeleteTrialRequest, + dict, +]) +def test_delete_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + client.delete_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + +@pytest.mark.asyncio +async def test_delete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_trial_async_from_dict(): + await test_delete_trial_async(request_type=dict) + + +def test_delete_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + call.return_value = None + client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_trial( + vizier_service.DeleteTrialRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_trial( + vizier_service.DeleteTrialRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CheckTrialEarlyStoppingStateRequest, + dict, +]) +def test_check_trial_early_stopping_state(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_check_trial_early_stopping_state_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + client.check_trial_early_stopping_state() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_async_from_dict(): + await test_check_trial_early_stopping_state_async(request_type=dict) + + +def test_check_trial_early_stopping_state_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CheckTrialEarlyStoppingStateRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CheckTrialEarlyStoppingStateRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.StopTrialRequest, + dict, +]) +def test_stop_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_stop_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + client.stop_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + +@pytest.mark.asyncio +async def test_stop_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.StopTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_stop_trial_async_from_dict(): + await test_stop_trial_async(request_type=dict) + + +def test_stop_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.StopTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + call.return_value = study.Trial() + client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_stop_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.StopTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.ListOptimalTrialsRequest, + dict, +]) +def test_list_optimal_trials(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse( + ) + response = client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, vizier_service.ListOptimalTrialsResponse) + + +def test_list_optimal_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + client.list_optimal_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + +@pytest.mark.asyncio +async def test_list_optimal_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListOptimalTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse( + )) + response = await client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, vizier_service.ListOptimalTrialsResponse) + + +@pytest.mark.asyncio +async def test_list_optimal_trials_async_from_dict(): + await test_list_optimal_trials_async(request_type=dict) + + +def test_list_optimal_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListOptimalTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + call.return_value = vizier_service.ListOptimalTrialsResponse() + client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_optimal_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListOptimalTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) + await client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_optimal_trials_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_optimal_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_optimal_trials_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_optimal_trials( + vizier_service.ListOptimalTrialsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_optimal_trials_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_optimal_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_optimal_trials_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_optimal_trials( + vizier_service.ListOptimalTrialsRequest(), + parent='parent_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = VizierServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.VizierServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.VizierServiceGrpcTransport, + transports.VizierServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = VizierServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.VizierServiceGrpcTransport, + ) + +def test_vizier_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.VizierServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_vizier_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.VizierServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_study', + 'get_study', + 'list_studies', + 'delete_study', + 'lookup_study', + 'suggest_trials', + 'create_trial', + 'get_trial', + 'list_trials', + 'add_trial_measurement', + 'complete_trial', + 'delete_trial', + 'check_trial_early_stopping_state', + 'stop_trial', + 'list_optimal_trials', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_vizier_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VizierServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_vizier_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VizierServiceTransport() + adc.assert_called_once() + + +def test_vizier_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VizierServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VizierServiceGrpcTransport, + transports.VizierServiceGrpcAsyncIOTransport, + ], +) +def test_vizier_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.VizierServiceGrpcTransport, grpc_helpers), + (transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_vizier_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_vizier_service_host_no_port(transport_name): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_vizier_service_host_with_port(transport_name): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_vizier_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VizierServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_vizier_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VizierServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_vizier_service_grpc_lro_client(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_vizier_service_grpc_lro_async_client(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_custom_job_path(): + project = "squid" + location = "clam" + custom_job = "whelk" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = VizierServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "custom_job": "nudibranch", + } + path = VizierServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_study_path(): + project = "cuttlefish" + location = "mussel" + study = "winkle" + expected = "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) + actual = VizierServiceClient.study_path(project, location, study) + assert expected == actual + + +def test_parse_study_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "study": "abalone", + } + path = VizierServiceClient.study_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_study_path(path) + assert expected == actual + +def test_trial_path(): + project = "squid" + location = "clam" + study = "whelk" + trial = "octopus" + expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + actual = VizierServiceClient.trial_path(project, location, study, trial) + assert expected == actual + + +def test_parse_trial_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "study": "cuttlefish", + "trial": "mussel", + } + path = VizierServiceClient.trial_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_trial_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = VizierServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = VizierServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = VizierServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = VizierServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = VizierServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = VizierServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = VizierServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = VizierServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = VizierServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = VizierServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = VizierServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc new file mode 100644 index 0000000000..a328166917 --- /dev/null +++ b/owl-bot-staging/v1beta1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/.flake8 b/owl-bot-staging/v1beta1/.flake8 new file mode 100644 index 0000000000..29227d4cf4 --- /dev/null +++ b/owl-bot-staging/v1beta1/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in new file mode 100644 index 0000000000..e386e05fec --- /dev/null +++ b/owl-bot-staging/v1beta1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition *.py +recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst new file mode 100644 index 0000000000..c0e4d26d4d --- /dev/null +++ b/owl-bot-staging/v1beta1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst new file mode 100644 index 0000000000..43fad30e55 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst @@ -0,0 +1,10 @@ +DatasetService +-------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst new file mode 100644 index 0000000000..022799a059 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst @@ -0,0 +1,10 @@ +EndpointService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst new file mode 100644 index 0000000000..21013eb751 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst @@ -0,0 +1,6 @@ +FeaturestoreOnlineServingService +-------------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst new file mode 100644 index 0000000000..8d2f33039e --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst @@ -0,0 +1,10 @@ +FeaturestoreService +------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst new file mode 100644 index 0000000000..65c910142e --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst @@ -0,0 +1,10 @@ +IndexEndpointService +-------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst new file mode 100644 index 0000000000..96afb58594 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst @@ -0,0 +1,10 @@ +IndexService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst new file mode 100644 index 0000000000..46b1268166 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst @@ -0,0 +1,10 @@ +JobService +---------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst new file mode 100644 index 0000000000..3c07725687 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst @@ -0,0 +1,10 @@ +MetadataService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst new file mode 100644 index 0000000000..be164d59ba --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst @@ -0,0 +1,10 @@ +MigrationService +---------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst new file mode 100644 index 0000000000..be68f796b0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst @@ -0,0 +1,10 @@ +ModelService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst new file mode 100644 index 0000000000..1180370863 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst @@ -0,0 +1,10 @@ +PipelineService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst new file mode 100644 index 0000000000..03c1150df0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.prediction_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst new file mode 100644 index 0000000000..490112c7d9 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst @@ -0,0 +1,20 @@ +Services for Google Cloud Aiplatform v1beta1 API +================================================ +.. toctree:: + :maxdepth: 2 + + dataset_service + endpoint_service + featurestore_online_serving_service + featurestore_service + index_endpoint_service + index_service + job_service + metadata_service + migration_service + model_service + pipeline_service + prediction_service + specialist_pool_service + tensorboard_service + vizier_service diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst new file mode 100644 index 0000000000..2f13b68844 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst @@ -0,0 +1,10 @@ +SpecialistPoolService +--------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst new file mode 100644 index 0000000000..97d94feedc --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst @@ -0,0 +1,10 @@ +TensorboardService +------------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst new file mode 100644 index 0000000000..770675f8ea --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform v1beta1 API +============================================= + +.. automodule:: google.cloud.aiplatform_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst new file mode 100644 index 0000000000..8cad590f6c --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst @@ -0,0 +1,10 @@ +VizierService +------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py new file mode 100644 index 0000000000..422f0c91df --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1beta1-schema-trainingjob-definition documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "4.0.1" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition" +copyright = u"2022, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1beta1 Schema Trainingjob Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition.tex", + u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", + u"Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", + u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", + author, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", + "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst new file mode 100644 index 0000000000..5f1ed5f2b7 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API +====================================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst new file mode 100644 index 0000000000..f4fe7a5301 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API +=================================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst new file mode 100644 index 0000000000..ec6c42c2ed --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + definition_v1beta1/services + definition_v1beta1/types diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst new file mode 100644 index 0000000000..941dbcca59 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +================================================================================ +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst new file mode 100644 index 0000000000..7caa088065 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +============================================================================= + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst new file mode 100644 index 0000000000..b3b897a0f4 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +============================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst new file mode 100644 index 0000000000..722a1d8ba0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +=========================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst new file mode 100644 index 0000000000..6de5e17520 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +================================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst new file mode 100644 index 0000000000..b14182d6d7 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +=============================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py new file mode 100644 index 0000000000..c526c0c89b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py @@ -0,0 +1,963 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform_v1beta1.services.dataset_service.client import DatasetServiceClient +from google.cloud.aiplatform_v1beta1.services.dataset_service.async_client import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service.client import EndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service.async_client import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.client import FeaturestoreOnlineServingServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.async_client import FeaturestoreOnlineServingServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service.client import FeaturestoreServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service.async_client import FeaturestoreServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.client import IndexEndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.async_client import IndexEndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_service.client import IndexServiceClient +from google.cloud.aiplatform_v1beta1.services.index_service.async_client import IndexServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.job_service.client import JobServiceClient +from google.cloud.aiplatform_v1beta1.services.job_service.async_client import JobServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.metadata_service.client import MetadataServiceClient +from google.cloud.aiplatform_v1beta1.services.metadata_service.async_client import MetadataServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.migration_service.client import MigrationServiceClient +from google.cloud.aiplatform_v1beta1.services.migration_service.async_client import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.model_service.client import ModelServiceClient +from google.cloud.aiplatform_v1beta1.services.model_service.async_client import ModelServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service.client import PipelineServiceClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service.async_client import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.prediction_service.client import PredictionServiceClient +from google.cloud.aiplatform_v1beta1.services.prediction_service.async_client import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.client import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service.client import TensorboardServiceClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service.async_client import TensorboardServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.vizier_service.client import VizierServiceClient +from google.cloud.aiplatform_v1beta1.services.vizier_service.async_client import VizierServiceAsyncClient + +from google.cloud.aiplatform_v1beta1.types.accelerator_type import AcceleratorType +from google.cloud.aiplatform_v1beta1.types.annotation import Annotation +from google.cloud.aiplatform_v1beta1.types.annotation_spec import AnnotationSpec +from google.cloud.aiplatform_v1beta1.types.artifact import Artifact +from google.cloud.aiplatform_v1beta1.types.batch_prediction_job import BatchPredictionJob +from google.cloud.aiplatform_v1beta1.types.completion_stats import CompletionStats +from google.cloud.aiplatform_v1beta1.types.context import Context +from google.cloud.aiplatform_v1beta1.types.custom_job import ContainerSpec +from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob +from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJobSpec +from google.cloud.aiplatform_v1beta1.types.custom_job import PythonPackageSpec +from google.cloud.aiplatform_v1beta1.types.custom_job import Scheduling +from google.cloud.aiplatform_v1beta1.types.custom_job import WorkerPoolSpec +from google.cloud.aiplatform_v1beta1.types.data_item import DataItem +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import ActiveLearningConfig +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import DataLabelingJob +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import SampleConfig +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import TrainingConfig +from google.cloud.aiplatform_v1beta1.types.dataset import Dataset +from google.cloud.aiplatform_v1beta1.types.dataset import ExportDataConfig +from google.cloud.aiplatform_v1beta1.types.dataset import ImportDataConfig +from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetOperationMetadata +from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import DeleteDatasetRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataOperationMetadata +from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import GetAnnotationSpecRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import GetDatasetRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataOperationMetadata +from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import UpdateDatasetRequest +from google.cloud.aiplatform_v1beta1.types.deployed_index_ref import DeployedIndexRef +from google.cloud.aiplatform_v1beta1.types.deployed_model_ref import DeployedModelRef +from google.cloud.aiplatform_v1beta1.types.encryption_spec import EncryptionSpec +from google.cloud.aiplatform_v1beta1.types.endpoint import DeployedModel +from google.cloud.aiplatform_v1beta1.types.endpoint import Endpoint +from google.cloud.aiplatform_v1beta1.types.endpoint import PredictRequestResponseLoggingConfig +from google.cloud.aiplatform_v1beta1.types.endpoint import PrivateEndpoints +from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointOperationMetadata +from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeleteEndpointRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelResponse +from google.cloud.aiplatform_v1beta1.types.endpoint_service import GetEndpointRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsResponse +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelResponse +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UpdateEndpointRequest +from google.cloud.aiplatform_v1beta1.types.entity_type import EntityType +from google.cloud.aiplatform_v1beta1.types.env_var import EnvVar +from google.cloud.aiplatform_v1beta1.types.event import Event +from google.cloud.aiplatform_v1beta1.types.execution import Execution +from google.cloud.aiplatform_v1beta1.types.explanation import Attribution +from google.cloud.aiplatform_v1beta1.types.explanation import BlurBaselineConfig +from google.cloud.aiplatform_v1beta1.types.explanation import Examples +from google.cloud.aiplatform_v1beta1.types.explanation import ExamplesOverride +from google.cloud.aiplatform_v1beta1.types.explanation import ExamplesRestrictionsNamespace +from google.cloud.aiplatform_v1beta1.types.explanation import Explanation +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationMetadataOverride +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationParameters +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpec +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpecOverride +from google.cloud.aiplatform_v1beta1.types.explanation import FeatureNoiseSigma +from google.cloud.aiplatform_v1beta1.types.explanation import IntegratedGradientsAttribution +from google.cloud.aiplatform_v1beta1.types.explanation import ModelExplanation +from google.cloud.aiplatform_v1beta1.types.explanation import Neighbor +from google.cloud.aiplatform_v1beta1.types.explanation import Presets +from google.cloud.aiplatform_v1beta1.types.explanation import SampledShapleyAttribution +from google.cloud.aiplatform_v1beta1.types.explanation import SmoothGradConfig +from google.cloud.aiplatform_v1beta1.types.explanation import XraiAttribution +from google.cloud.aiplatform_v1beta1.types.explanation_metadata import ExplanationMetadata +from google.cloud.aiplatform_v1beta1.types.feature import Feature +from google.cloud.aiplatform_v1beta1.types.feature_monitoring_stats import FeatureStatsAnomaly +from google.cloud.aiplatform_v1beta1.types.feature_selector import FeatureSelector +from google.cloud.aiplatform_v1beta1.types.feature_selector import IdMatcher +from google.cloud.aiplatform_v1beta1.types.featurestore import Featurestore +from google.cloud.aiplatform_v1beta1.types.featurestore_monitoring import FeaturestoreMonitoringConfig +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValue +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValueList +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import StreamingReadFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DestinationFeatureSetting +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import FeatureValueDestination +from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.hyperparameter_tuning_job import HyperparameterTuningJob +from google.cloud.aiplatform_v1beta1.types.index import Index +from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndex +from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndexAuthConfig +from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexEndpoint +from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexPrivateEndpoints +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeleteIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexResponse +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import GetIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsResponse +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import MutateDeployedIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import MutateDeployedIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import MutateDeployedIndexResponse +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexResponse +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UpdateIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_service import DeleteIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_service import GetIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesRequest +from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesResponse +from google.cloud.aiplatform_v1beta1.types.index_service import NearestNeighborSearchOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexRequest +from google.cloud.aiplatform_v1beta1.types.io import AvroSource +from google.cloud.aiplatform_v1beta1.types.io import BigQueryDestination +from google.cloud.aiplatform_v1beta1.types.io import BigQuerySource +from google.cloud.aiplatform_v1beta1.types.io import ContainerRegistryDestination +from google.cloud.aiplatform_v1beta1.types.io import CsvDestination +from google.cloud.aiplatform_v1beta1.types.io import CsvSource +from google.cloud.aiplatform_v1beta1.types.io import GcsDestination +from google.cloud.aiplatform_v1beta1.types.io import GcsSource +from google.cloud.aiplatform_v1beta1.types.io import TFRecordDestination +from google.cloud.aiplatform_v1beta1.types.job_service import CancelBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CancelCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CancelDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CancelHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import PauseModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ResumeModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest +from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse +from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata +from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_state import JobState +from google.cloud.aiplatform_v1beta1.types.lineage_subgraph import LineageSubgraph +from google.cloud.aiplatform_v1beta1.types.machine_resources import AutomaticResources +from google.cloud.aiplatform_v1beta1.types.machine_resources import AutoscalingMetricSpec +from google.cloud.aiplatform_v1beta1.types.machine_resources import BatchDedicatedResources +from google.cloud.aiplatform_v1beta1.types.machine_resources import DedicatedResources +from google.cloud.aiplatform_v1beta1.types.machine_resources import DiskSpec +from google.cloud.aiplatform_v1beta1.types.machine_resources import MachineSpec +from google.cloud.aiplatform_v1beta1.types.machine_resources import NfsMount +from google.cloud.aiplatform_v1beta1.types.machine_resources import ResourcesConsumed +from google.cloud.aiplatform_v1beta1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from google.cloud.aiplatform_v1beta1.types.metadata_schema import MetadataSchema +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateArtifactRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateExecutionRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataSchemaRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteArtifactRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteExecutionRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetArtifactRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetExecutionRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataSchemaRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataStoreRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeArtifactsMetadata +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeArtifactsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeArtifactsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeContextsMetadata +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeContextsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeContextsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeExecutionsMetadata +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeExecutionsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeExecutionsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryArtifactLineageSubgraphRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryContextLineageSubgraphRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryExecutionInputsAndOutputsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateArtifactRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateExecutionRequest +from google.cloud.aiplatform_v1beta1.types.metadata_store import MetadataStore +from google.cloud.aiplatform_v1beta1.types.migratable_resource import MigratableResource +from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesRequest +from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesResponse +from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceRequest +from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceResponse +from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesRequest +from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesResponse +from google.cloud.aiplatform_v1beta1.types.model import Model +from google.cloud.aiplatform_v1beta1.types.model import ModelContainerSpec +from google.cloud.aiplatform_v1beta1.types.model import Port +from google.cloud.aiplatform_v1beta1.types.model import PredictSchemata +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType +from google.cloud.aiplatform_v1beta1.types.model_evaluation import ModelEvaluation +from google.cloud.aiplatform_v1beta1.types.model_evaluation_slice import ModelEvaluationSlice +from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringAlertConfig +from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringObjectiveConfig +from google.cloud.aiplatform_v1beta1.types.model_monitoring import SamplingStrategy +from google.cloud.aiplatform_v1beta1.types.model_monitoring import ThresholdConfig +from google.cloud.aiplatform_v1beta1.types.model_service import DeleteModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import DeleteModelVersionRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelResponse +from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationRequest +from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationSliceRequest +from google.cloud.aiplatform_v1beta1.types.model_service import GetModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ImportModelEvaluationRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesResponse +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsResponse +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsResponse +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelVersionsRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelVersionsResponse +from google.cloud.aiplatform_v1beta1.types.model_service import MergeVersionAliasesRequest +from google.cloud.aiplatform_v1beta1.types.model_service import UpdateExplanationDatasetOperationMetadata +from google.cloud.aiplatform_v1beta1.types.model_service import UpdateExplanationDatasetRequest +from google.cloud.aiplatform_v1beta1.types.model_service import UpdateExplanationDatasetResponse +from google.cloud.aiplatform_v1beta1.types.model_service import UpdateModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelResponse +from google.cloud.aiplatform_v1beta1.types.operation import DeleteOperationMetadata +from google.cloud.aiplatform_v1beta1.types.operation import GenericOperationMetadata +from google.cloud.aiplatform_v1beta1.types.pipeline_failure_policy import PipelineFailurePolicy +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJob +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJobDetail +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskDetail +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskExecutorDetail +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTemplateMetadata +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelPipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreatePipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreateTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeletePipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeleteTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetPipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsResponse +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesResponse +from google.cloud.aiplatform_v1beta1.types.pipeline_state import PipelineState +from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainRequest +from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainResponse +from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictRequest +from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictResponse +from google.cloud.aiplatform_v1beta1.types.prediction_service import RawPredictRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool import SpecialistPool +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import DeleteSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import GetSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsResponse +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.study import Measurement +from google.cloud.aiplatform_v1beta1.types.study import Study +from google.cloud.aiplatform_v1beta1.types.study import StudySpec +from google.cloud.aiplatform_v1beta1.types.study import Trial +from google.cloud.aiplatform_v1beta1.types.tensorboard import Tensorboard +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import Scalar +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlob +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlobSequence +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardTensor +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesData +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesDataPoint +from google.cloud.aiplatform_v1beta1.types.tensorboard_experiment import TensorboardExperiment +from google.cloud.aiplatform_v1beta1.types.tensorboard_run import TensorboardRun +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardRunsRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardRunsResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardOperationMetadata +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardOperationMetadata +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardExperimentDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardExperimentDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_time_series import TensorboardTimeSeries +from google.cloud.aiplatform_v1beta1.types.training_pipeline import FilterSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import FractionSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import InputDataConfig +from google.cloud.aiplatform_v1beta1.types.training_pipeline import PredefinedSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import StratifiedSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import TimestampSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import TrainingPipeline +from google.cloud.aiplatform_v1beta1.types.types import BoolArray +from google.cloud.aiplatform_v1beta1.types.types import DoubleArray +from google.cloud.aiplatform_v1beta1.types.types import Int64Array +from google.cloud.aiplatform_v1beta1.types.types import StringArray +from google.cloud.aiplatform_v1beta1.types.unmanaged_container_model import UnmanagedContainerModel +from google.cloud.aiplatform_v1beta1.types.user_action_reference import UserActionReference +from google.cloud.aiplatform_v1beta1.types.value import Value +from google.cloud.aiplatform_v1beta1.types.vizier_service import AddTrialMeasurementRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateMetatdata +from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import CompleteTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import GetStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import GetTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import LookupStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import StopTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsMetadata +from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsResponse + +__all__ = ('DatasetServiceClient', + 'DatasetServiceAsyncClient', + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', + 'FeaturestoreOnlineServingServiceClient', + 'FeaturestoreOnlineServingServiceAsyncClient', + 'FeaturestoreServiceClient', + 'FeaturestoreServiceAsyncClient', + 'IndexEndpointServiceClient', + 'IndexEndpointServiceAsyncClient', + 'IndexServiceClient', + 'IndexServiceAsyncClient', + 'JobServiceClient', + 'JobServiceAsyncClient', + 'MetadataServiceClient', + 'MetadataServiceAsyncClient', + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', + 'ModelServiceClient', + 'ModelServiceAsyncClient', + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', + 'TensorboardServiceClient', + 'TensorboardServiceAsyncClient', + 'VizierServiceClient', + 'VizierServiceAsyncClient', + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedIndexRef', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'PredictRequestResponseLoggingConfig', + 'PrivateEndpoints', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EntityType', + 'EnvVar', + 'Event', + 'Execution', + 'Attribution', + 'BlurBaselineConfig', + 'Examples', + 'ExamplesOverride', + 'ExamplesRestrictionsNamespace', + 'Explanation', + 'ExplanationMetadataOverride', + 'ExplanationParameters', + 'ExplanationSpec', + 'ExplanationSpecOverride', + 'FeatureNoiseSigma', + 'IntegratedGradientsAttribution', + 'ModelExplanation', + 'Neighbor', + 'Presets', + 'SampledShapleyAttribution', + 'SmoothGradConfig', + 'XraiAttribution', + 'ExplanationMetadata', + 'Feature', + 'FeatureStatsAnomaly', + 'FeatureSelector', + 'IdMatcher', + 'Featurestore', + 'FeaturestoreMonitoringConfig', + 'FeatureValue', + 'FeatureValueList', + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'BatchCreateFeaturesOperationMetadata', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'BatchReadFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesRequest', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeOperationMetadata', + 'CreateEntityTypeRequest', + 'CreateFeatureOperationMetadata', + 'CreateFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'CreateFeaturestoreRequest', + 'DeleteEntityTypeRequest', + 'DeleteFeatureRequest', + 'DeleteFeaturestoreRequest', + 'DestinationFeatureSetting', + 'ExportFeatureValuesOperationMetadata', + 'ExportFeatureValuesRequest', + 'ExportFeatureValuesResponse', + 'FeatureValueDestination', + 'GetEntityTypeRequest', + 'GetFeatureRequest', + 'GetFeaturestoreRequest', + 'ImportFeatureValuesOperationMetadata', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateEntityTypeRequest', + 'UpdateFeatureRequest', + 'UpdateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreRequest', + 'HyperparameterTuningJob', + 'Index', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexEndpoint', + 'IndexPrivateEndpoints', + 'CreateIndexEndpointOperationMetadata', + 'CreateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexOperationMetadata', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'MutateDeployedIndexOperationMetadata', + 'MutateDeployedIndexRequest', + 'MutateDeployedIndexResponse', + 'UndeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UpdateIndexEndpointRequest', + 'CreateIndexOperationMetadata', + 'CreateIndexRequest', + 'DeleteIndexRequest', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'NearestNeighborSearchOperationMetadata', + 'UpdateIndexOperationMetadata', + 'UpdateIndexRequest', + 'AvroSource', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'CsvDestination', + 'CsvSource', + 'GcsDestination', + 'GcsSource', + 'TFRecordDestination', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'GetModelDeploymentMonitoringJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + 'UpdateModelDeploymentMonitoringJobRequest', + 'JobState', + 'LineageSubgraph', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'NfsMount', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MetadataSchema', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'CreateArtifactRequest', + 'CreateContextRequest', + 'CreateExecutionRequest', + 'CreateMetadataSchemaRequest', + 'CreateMetadataStoreOperationMetadata', + 'CreateMetadataStoreRequest', + 'DeleteArtifactRequest', + 'DeleteContextRequest', + 'DeleteExecutionRequest', + 'DeleteMetadataStoreOperationMetadata', + 'DeleteMetadataStoreRequest', + 'GetArtifactRequest', + 'GetContextRequest', + 'GetExecutionRequest', + 'GetMetadataSchemaRequest', + 'GetMetadataStoreRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'ListContextsRequest', + 'ListContextsResponse', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'PurgeArtifactsMetadata', + 'PurgeArtifactsRequest', + 'PurgeArtifactsResponse', + 'PurgeContextsMetadata', + 'PurgeContextsRequest', + 'PurgeContextsResponse', + 'PurgeExecutionsMetadata', + 'PurgeExecutionsRequest', + 'PurgeExecutionsResponse', + 'QueryArtifactLineageSubgraphRequest', + 'QueryContextLineageSubgraphRequest', + 'QueryExecutionInputsAndOutputsRequest', + 'UpdateArtifactRequest', + 'UpdateContextRequest', + 'UpdateExecutionRequest', + 'MetadataStore', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + 'ModelDeploymentMonitoringObjectiveType', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'ModelMonitoringAlertConfig', + 'ModelMonitoringObjectiveConfig', + 'SamplingStrategy', + 'ThresholdConfig', + 'DeleteModelRequest', + 'DeleteModelVersionRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ImportModelEvaluationRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListModelVersionsRequest', + 'ListModelVersionsResponse', + 'MergeVersionAliasesRequest', + 'UpdateExplanationDatasetOperationMetadata', + 'UpdateExplanationDatasetRequest', + 'UpdateExplanationDatasetResponse', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineFailurePolicy', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'PipelineTemplateMetadata', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'ExplainRequest', + 'ExplainResponse', + 'PredictRequest', + 'PredictResponse', + 'RawPredictRequest', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'Study', + 'StudySpec', + 'Trial', + 'Tensorboard', + 'Scalar', + 'TensorboardBlob', + 'TensorboardBlobSequence', + 'TensorboardTensor', + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'TensorboardExperiment', + 'TensorboardRun', + 'BatchCreateTensorboardRunsRequest', + 'BatchCreateTensorboardRunsResponse', + 'BatchCreateTensorboardTimeSeriesRequest', + 'BatchCreateTensorboardTimeSeriesResponse', + 'BatchReadTensorboardTimeSeriesDataRequest', + 'BatchReadTensorboardTimeSeriesDataResponse', + 'CreateTensorboardExperimentRequest', + 'CreateTensorboardOperationMetadata', + 'CreateTensorboardRequest', + 'CreateTensorboardRunRequest', + 'CreateTensorboardTimeSeriesRequest', + 'DeleteTensorboardExperimentRequest', + 'DeleteTensorboardRequest', + 'DeleteTensorboardRunRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'GetTensorboardExperimentRequest', + 'GetTensorboardRequest', + 'GetTensorboardRunRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'UpdateTensorboardExperimentRequest', + 'UpdateTensorboardOperationMetadata', + 'UpdateTensorboardRequest', + 'UpdateTensorboardRunRequest', + 'UpdateTensorboardTimeSeriesRequest', + 'WriteTensorboardExperimentDataRequest', + 'WriteTensorboardExperimentDataResponse', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'TensorboardTimeSeries', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'StratifiedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + 'UnmanagedContainerModel', + 'UserActionReference', + 'Value', + 'AddTrialMeasurementRequest', + 'CheckTrialEarlyStoppingStateMetatdata', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CompleteTrialRequest', + 'CreateStudyRequest', + 'CreateTrialRequest', + 'DeleteStudyRequest', + 'DeleteTrialRequest', + 'GetStudyRequest', + 'GetTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'LookupStudyRequest', + 'StopTrialRequest', + 'SuggestTrialsMetadata', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..9c9d054688 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..4d6b2e70eb --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..38379e8208 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py new file mode 100644 index 0000000000..46c9c24a2b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..72ada19675 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..ee39a33cd4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..4d41021c43 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py new file mode 100644 index 0000000000..bc7c74d9fd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..b002f74840 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. Vertex AI will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..95d91ba989 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..0ca974bb4e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..fa29339130 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..45237bb589 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..f2ece5351c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py new file mode 100644 index 0000000000..0a16e52cea --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..6b925dd9dc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py new file mode 100644 index 0000000000..20e59dc95c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..4a5bd2b13f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..9bd071dc86 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..b89d94007e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..ae9aa725ca --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..b1c9a82d98 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. Vertex AI returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. Vertex AI determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. Vertex AI then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..6a98332abb --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..2e7bb7906e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..03600cb8b6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'TimeSeriesForecastingPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..99d3dc6402 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py new file mode 100644 index 0000000000..0b3729232c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py new file mode 100644 index 0000000000..c3423c5e6d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..726fe7437b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..d327036a5d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py new file mode 100644 index 0000000000..ceb78f078b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py new file mode 100644 index 0000000000..8f4e950d26 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..3b6ee94c90 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..613a0df5fb --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py new file mode 100644 index 0000000000..7e2990d5a7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TimeSeriesForecastingPredictionResult', + }, +) + + +class TimeSeriesForecastingPredictionResult(proto.Message): + r"""Prediction output format for Time Series Forecasting. + + Attributes: + value (float): + The regression value. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..2ea5be71f9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..122b60c55c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..a55b2afde6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py new file mode 100644 index 0000000000..7e9887ac05 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassification +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetection +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentation +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTables +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassification +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassificationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtraction +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtractionInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentiment +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentimentInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecasting +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognition +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassification +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassificationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTracking +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ('AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlForecasting', + 'AutoMlForecastingInputs', + 'AutoMlForecastingMetadata', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py new file mode 100644 index 0000000000..14d161aa55 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.automl_image_classification import AutoMlImageClassification +from .types.automl_image_classification import AutoMlImageClassificationInputs +from .types.automl_image_classification import AutoMlImageClassificationMetadata +from .types.automl_image_object_detection import AutoMlImageObjectDetection +from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from .types.automl_image_segmentation import AutoMlImageSegmentation +from .types.automl_image_segmentation import AutoMlImageSegmentationInputs +from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from .types.automl_tables import AutoMlTables +from .types.automl_tables import AutoMlTablesInputs +from .types.automl_tables import AutoMlTablesMetadata +from .types.automl_text_classification import AutoMlTextClassification +from .types.automl_text_classification import AutoMlTextClassificationInputs +from .types.automl_text_extraction import AutoMlTextExtraction +from .types.automl_text_extraction import AutoMlTextExtractionInputs +from .types.automl_text_sentiment import AutoMlTextSentiment +from .types.automl_text_sentiment import AutoMlTextSentimentInputs +from .types.automl_time_series_forecasting import AutoMlForecasting +from .types.automl_time_series_forecasting import AutoMlForecastingInputs +from .types.automl_time_series_forecasting import AutoMlForecastingMetadata +from .types.automl_video_action_recognition import AutoMlVideoActionRecognition +from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from .types.automl_video_classification import AutoMlVideoClassification +from .types.automl_video_classification import AutoMlVideoClassificationInputs +from .types.automl_video_object_tracking import AutoMlVideoObjectTracking +from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ( +'AutoMlForecasting', +'AutoMlForecastingInputs', +'AutoMlForecastingMetadata', +'AutoMlImageClassification', +'AutoMlImageClassificationInputs', +'AutoMlImageClassificationMetadata', +'AutoMlImageObjectDetection', +'AutoMlImageObjectDetectionInputs', +'AutoMlImageObjectDetectionMetadata', +'AutoMlImageSegmentation', +'AutoMlImageSegmentationInputs', +'AutoMlImageSegmentationMetadata', +'AutoMlTables', +'AutoMlTablesInputs', +'AutoMlTablesMetadata', +'AutoMlTextClassification', +'AutoMlTextClassificationInputs', +'AutoMlTextExtraction', +'AutoMlTextExtractionInputs', +'AutoMlTextSentiment', +'AutoMlTextSentimentInputs', +'AutoMlVideoActionRecognition', +'AutoMlVideoActionRecognitionInputs', +'AutoMlVideoClassification', +'AutoMlVideoClassificationInputs', +'AutoMlVideoObjectTracking', +'AutoMlVideoObjectTrackingInputs', +'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..6de794c90a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py new file mode 100644 index 0000000000..97581ea020 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .automl_image_classification import ( + AutoMlImageClassification, + AutoMlImageClassificationInputs, + AutoMlImageClassificationMetadata, +) +from .automl_image_object_detection import ( + AutoMlImageObjectDetection, + AutoMlImageObjectDetectionInputs, + AutoMlImageObjectDetectionMetadata, +) +from .automl_image_segmentation import ( + AutoMlImageSegmentation, + AutoMlImageSegmentationInputs, + AutoMlImageSegmentationMetadata, +) +from .automl_tables import ( + AutoMlTables, + AutoMlTablesInputs, + AutoMlTablesMetadata, +) +from .automl_text_classification import ( + AutoMlTextClassification, + AutoMlTextClassificationInputs, +) +from .automl_text_extraction import ( + AutoMlTextExtraction, + AutoMlTextExtractionInputs, +) +from .automl_text_sentiment import ( + AutoMlTextSentiment, + AutoMlTextSentimentInputs, +) +from .automl_time_series_forecasting import ( + AutoMlForecasting, + AutoMlForecastingInputs, + AutoMlForecastingMetadata, +) +from .automl_video_action_recognition import ( + AutoMlVideoActionRecognition, + AutoMlVideoActionRecognitionInputs, +) +from .automl_video_classification import ( + AutoMlVideoClassification, + AutoMlVideoClassificationInputs, +) +from .automl_video_object_tracking import ( + AutoMlVideoObjectTracking, + AutoMlVideoObjectTrackingInputs, +) +from .export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) + +__all__ = ( + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlForecasting', + 'AutoMlForecastingInputs', + 'AutoMlForecastingMetadata', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py new file mode 100644 index 0000000000..2cfbe321e4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + }, +) + + +class AutoMlImageClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageClassificationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageClassificationMetadata', + ) + + +class AutoMlImageClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs.ModelType): + + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 8,000 and + 800,000 milli node hours, inclusive. The default value is + 192,000 which represents one day in wall time, considering 8 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, + the training budget must be between 1,000 and 100,000 milli + node hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Classification might stop training + before the entire training budget has been used. + multi_label (bool): + If false, a single-label (multi-class) Model + will be trained (i.e. assuming that for each + image just up to one annotation may be + applicable). If true, a multi-label Model will + be trained (i.e. assuming that for each image + multiple annotations may be applicable). + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_TF_LOW_LATENCY_1 = 2 + MOBILE_TF_VERSATILE_1 = 3 + MOBILE_TF_HIGH_ACCURACY_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + base_model_id = proto.Field( + proto.STRING, + number=2, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=3, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=4, + ) + multi_label = proto.Field( + proto.BOOL, + number=5, + ) + + +class AutoMlImageClassificationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py new file mode 100644 index 0000000000..eb32b4912e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + }, +) + + +class AutoMlImageObjectDetection(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image Object + Detection Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata): + The metadata information + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageObjectDetectionInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageObjectDetectionMetadata', + ) + + +class AutoMlImageObjectDetectionInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 20,000 and + 900,000 milli node hours, inclusive. The default value is + 216,000 which represents one day in wall time, considering 9 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the + training budget must be between 1,000 and 100,000 milli node + hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Object Detection might stop + training before the entire training budget has + been used. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_LATENCY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + MOBILE_TF_VERSATILE_1 = 4 + MOBILE_TF_HIGH_ACCURACY_1 = 5 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=3, + ) + + +class AutoMlImageObjectDetectionMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py new file mode 100644 index 0000000000..8a6bc62231 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + }, +) + + +class AutoMlImageSegmentation(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Segmentation Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageSegmentationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageSegmentationMetadata', + ) + + +class AutoMlImageSegmentationInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. Or actaul_wall_clock_hours = + train_budget_milli_node_hours / (number_of_nodes_involved \* + 1000) For modelType ``cloud-high-accuracy-1``\ (default), + the budget must be between 20,000 and 2,000,000 milli node + hours, inclusive. The default value is 192,000 which + represents one day in wall time (1000 milli \* 24 hours \* 8 + nodes). + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_ACCURACY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + base_model_id = proto.Field( + proto.STRING, + number=3, + ) + + +class AutoMlImageSegmentationMetadata(proto.Message): + r""" + + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py new file mode 100644 index 0000000000..ed96dc6792 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -0,0 +1,529 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + }, +) + + +class AutoMlTables(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Tables Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTablesInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlTablesMetadata', + ) + + +class AutoMlTablesInputs(proto.Message): + r""" + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "maximize-precision-at-recall". Must be between 0 and 1, + inclusive. + + This field is a member of `oneof`_ ``additional_optimization_objective_config``. + optimization_objective_precision_value (float): + Required when optimization_objective is + "maximize-recall-at-precision". Must be between 0 and 1, + inclusive. + + This field is a member of `oneof`_ ``additional_optimization_objective_config``. + prediction_type (str): + The type of prediction the Model is to + produce. "classification" - Predict one out of + multiple target values is + picked for each row. + "regression" - Predict a value based on its + relation to other values. This + type is available only to columns that contain + semantically numeric values, i.e. integers or + floating point number, even if + stored as e.g. strings. + target_column (str): + The column name of the target column that the + model is to predict. + transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that maximizes/minimizes the value of the + objective function over the validation set. + + The supported optimization objectives depend on + the prediction type. If the field is not set, a + default objective function is used. + classification (binary): + "maximize-au-roc" (default) - Maximize the + area under the receiver + operating characteristic (ROC) curve. + "minimize-log-loss" - Minimize log loss. + "maximize-au-prc" - Maximize the area under + the precision-recall curve. + "maximize-precision-at-recall" - Maximize + precision for a specified + recall value. "maximize-recall-at-precision" - + Maximize recall for a specified + precision value. + classification (multi-class): + "minimize-log-loss" (default) - Minimize log + loss. + regression: + "minimize-rmse" (default) - Minimize + root-mean-squared error (RMSE). "minimize-mae" + - Minimize mean-absolute error (MAE). + "minimize-rmsle" - Minimize root-mean-squared + log error (RMSLE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + weight_column_name (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + additional_experiments (Sequence[str]): + Additional experiment flags for the Tables + training pipeline. + """ + + class Transformation(proto.Message): + r""" + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.AutoTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + repeated_numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + repeated_categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + repeated_text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + time_format = proto.Field( + proto.STRING, + number=2, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=3, + ) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.AutoTransformation', + ) + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericTransformation', + ) + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalTransformation', + ) + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TimestampTransformation', + ) + text = proto.Field( + proto.MESSAGE, + number=5, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextTransformation', + ) + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', + ) + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', + ) + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextArrayTransformation', + ) + + optimization_objective_recall_value = proto.Field( + proto.FLOAT, + number=5, + oneof='additional_optimization_objective_config', + ) + optimization_objective_precision_value = proto.Field( + proto.FLOAT, + number=6, + oneof='additional_optimization_objective_config', + ) + prediction_type = proto.Field( + proto.STRING, + number=1, + ) + target_column = proto.Field( + proto.STRING, + number=2, + ) + transformations = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Transformation, + ) + optimization_objective = proto.Field( + proto.STRING, + number=4, + ) + train_budget_milli_node_hours = proto.Field( + proto.INT64, + number=7, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=8, + ) + weight_column_name = proto.Field( + proto.STRING, + number=9, + ) + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=10, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + additional_experiments = proto.RepeatedField( + proto.STRING, + number=11, + ) + + +class AutoMlTablesMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py new file mode 100644 index 0000000000..fa65a0ab22 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + }, +) + + +class AutoMlTextClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextClassificationInputs', + ) + + +class AutoMlTextClassificationInputs(proto.Message): + r""" + + Attributes: + multi_label (bool): + + """ + + multi_label = proto.Field( + proto.BOOL, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py new file mode 100644 index 0000000000..468fd84340 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + }, +) + + +class AutoMlTextExtraction(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Extraction Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextExtractionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextExtractionInputs', + ) + + +class AutoMlTextExtractionInputs(proto.Message): + r""" + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py new file mode 100644 index 0000000000..410330a15e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + }, +) + + +class AutoMlTextSentiment(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Sentiment Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextSentimentInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextSentimentInputs', + ) + + +class AutoMlTextSentimentInputs(proto.Message): + r""" + + Attributes: + sentiment_max (int): + A sentiment is expressed as an integer + ordinal, where higher value means a more + positive sentiment. The range of sentiments that + will be used is between 0 and sentimentMax + (inclusive on both ends), and all the values in + the range must be represented in the dataset + before a model can be created. + Only the Annotations with this sentimentMax will + be used for training. sentimentMax value must be + between 1 and 10 (inclusive). + """ + + sentiment_max = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py new file mode 100644 index 0000000000..b9a2d237f9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -0,0 +1,493 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlForecasting', + 'AutoMlForecastingInputs', + 'AutoMlForecastingMetadata', + }, +) + + +class AutoMlForecasting(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Forecasting + Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlForecastingInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlForecastingMetadata', + ) + + +class AutoMlForecastingInputs(proto.Message): + r""" + + Attributes: + target_column (str): + The name of the column that the model is to + predict. + time_series_identifier_column (str): + The name of the column that identifies the + time series. + time_column (str): + The name of the column that identifies time + order in the time series. + transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing towards. The + training process creates a model that optimizes the value of + the objective function over the validation set. + + The supported optimization objectives: + + - "minimize-rmse" (default) - Minimize root-mean-squared + error (RMSE). + + - "minimize-mae" - Minimize mean-absolute error (MAE). + + - "minimize-rmsle" - Minimize root-mean-squared log error + (RMSLE). + + - "minimize-rmspe" - Minimize root-mean-squared percentage + error (RMSPE). + + - "minimize-wape-mae" - Minimize the combination of + weighted absolute percentage error (WAPE) and + mean-absolute-error (MAE). + + - "minimize-quantile-loss" - Minimize the quantile loss at + the quantiles defined in ``quantiles``. + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + weight_column (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + time_series_attribute_columns (Sequence[str]): + Column names that should be used as attribute + columns. The value of these columns does not + vary as a function of time. For example, store + ID or item color. + unavailable_at_forecast_columns (Sequence[str]): + Names of columns that are unavailable when a forecast is + requested. This column contains information for the given + entity (identified by the time_series_identifier_column) + that is unknown before the forecast For example, actual + weather on a given day. + available_at_forecast_columns (Sequence[str]): + Names of columns that are available and provided when a + forecast is requested. These columns contain information for + the given entity (identified by the + time_series_identifier_column column) that is known at + forecast. For example, predicted weather for a specific day. + data_granularity (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Granularity): + Expected difference in time granularity + between rows in the data. + forecast_horizon (int): + The amount of time into the future for which forecasted + values for the target are returned. Expressed in number of + units defined by the ``data_granularity`` field. + context_window (int): + The amount of time into the past training and prediction + data is used for model training and prediction respectively. + Expressed in number of units defined by the + ``data_granularity`` field. + export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + quantiles (Sequence[float]): + Quantiles to use for minimize-quantile-loss + ``optimization_objective``. Up to 5 quantiles are allowed of + values between 0 and 1, exclusive. Required if the value of + optimization_objective is minimize-quantile-loss. Represents + the percent quantiles to use for that objective. Quantiles + must be unique. + validation_options (str): + Validation options for the data validation component. The + available options are: + + - "fail-pipeline" - default, will validate against the + validation and fail the pipeline if it fails. + + - "ignore-validation" - ignore the results of the + validation and continue + additional_experiments (Sequence[str]): + Additional experiment flags for the time + series forcasting training. + """ + + class Transformation(proto.Message): + r""" + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.AutoTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.NumericTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.CategoricalTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TimestampTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TextTransformation): + + This field is a member of `oneof`_ ``transformation_detail``. + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + + - The z_score of the value. + + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + + - Determine the year, month, day,and weekday. Treat each value from + the timestamp as a Categorical column. + + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + + - ``unix-milliseconds`` + + - ``unix-microseconds`` + + - ``unix-nanoseconds`` + + (for respectively number of seconds, milliseconds, + microseconds and nanoseconds since start of the Unix epoch); + + or be written in ``strftime`` syntax. + + If time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` + (e.g. 1985-04-12T23:20:50.52Z) + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + time_format = proto.Field( + proto.STRING, + number=2, + ) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.AutoTransformation', + ) + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.NumericTransformation', + ) + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.CategoricalTransformation', + ) + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.TimestampTransformation', + ) + text = proto.Field( + proto.MESSAGE, + number=5, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.TextTransformation', + ) + + class Granularity(proto.Message): + r"""A duration of time expressed in time granularity units. + + Attributes: + unit (str): + The time granularity unit of this time period. The supported + units are: + + - "minute" + + - "hour" + + - "day" + + - "week" + + - "month" + + - "year". + quantity (int): + The number of granularity_units between data points in the + training data. If ``granularity_unit`` is ``minute``, can be + 1, 5, 10, 15, or 30. For all other values of + ``granularity_unit``, must be 1. + """ + + unit = proto.Field( + proto.STRING, + number=1, + ) + quantity = proto.Field( + proto.INT64, + number=2, + ) + + target_column = proto.Field( + proto.STRING, + number=1, + ) + time_series_identifier_column = proto.Field( + proto.STRING, + number=2, + ) + time_column = proto.Field( + proto.STRING, + number=3, + ) + transformations = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Transformation, + ) + optimization_objective = proto.Field( + proto.STRING, + number=5, + ) + train_budget_milli_node_hours = proto.Field( + proto.INT64, + number=6, + ) + weight_column = proto.Field( + proto.STRING, + number=7, + ) + time_series_attribute_columns = proto.RepeatedField( + proto.STRING, + number=19, + ) + unavailable_at_forecast_columns = proto.RepeatedField( + proto.STRING, + number=20, + ) + available_at_forecast_columns = proto.RepeatedField( + proto.STRING, + number=21, + ) + data_granularity = proto.Field( + proto.MESSAGE, + number=22, + message=Granularity, + ) + forecast_horizon = proto.Field( + proto.INT64, + number=23, + ) + context_window = proto.Field( + proto.INT64, + number=24, + ) + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=15, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + quantiles = proto.RepeatedField( + proto.DOUBLE, + number=16, + ) + validation_options = proto.Field( + proto.STRING, + number=17, + ) + additional_experiments = proto.RepeatedField( + proto.STRING, + number=25, + ) + + +class AutoMlForecastingMetadata(proto.Message): + r"""Model metadata specific to AutoML Forecasting. + + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py new file mode 100644 index 0000000000..c6000d576f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + }, +) + + +class AutoMlVideoActionRecognition(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video Action + Recognition Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoActionRecognitionInputs', + ) + + +class AutoMlVideoActionRecognitionInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + MOBILE_CORAL_VERSATILE_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py new file mode 100644 index 0000000000..d6c1fab00b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + }, +) + + +class AutoMlVideoClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoClassificationInputs', + ) + + +class AutoMlVideoClassificationInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py new file mode 100644 index 0000000000..6474f8e8af --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + }, +) + + +class AutoMlVideoObjectTracking(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + ObjectTracking Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoObjectTrackingInputs', + ) + + +class AutoMlVideoObjectTrackingInputs(proto.Message): + r""" + + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_CORAL_VERSATILE_1 = 3 + MOBILE_CORAL_LOW_LATENCY_1 = 4 + MOBILE_JETSON_VERSATILE_1 = 5 + MOBILE_JETSON_LOW_LATENCY_1 = 6 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py new file mode 100644 index 0000000000..0c5426c029 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'ExportEvaluatedDataItemsConfig', + }, +) + + +class ExportEvaluatedDataItemsConfig(proto.Message): + r"""Configuration for exporting test set predictions to a + BigQuery table. + + Attributes: + destination_bigquery_uri (str): + URI of desired destination BigQuery table. Expected format: + bq://:: + + If not specified, then results are exported to the following + auto-created BigQuery table: + :export_evaluated_examples__.evaluated_examples + override_existing_table (bool): + If true and an export destination is + specified, then the contents of the destination + are overwritten. Otherwise, if the export + destination already exists, then the export + operation fails. + """ + + destination_bigquery_uri = proto.Field( + proto.STRING, + number=1, + ) + override_existing_table = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py new file mode 100644 index 0000000000..e77d58adca --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py @@ -0,0 +1,964 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.dataset_service import DatasetServiceClient +from .services.dataset_service import DatasetServiceAsyncClient +from .services.endpoint_service import EndpointServiceClient +from .services.endpoint_service import EndpointServiceAsyncClient +from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient +from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient +from .services.featurestore_service import FeaturestoreServiceClient +from .services.featurestore_service import FeaturestoreServiceAsyncClient +from .services.index_endpoint_service import IndexEndpointServiceClient +from .services.index_endpoint_service import IndexEndpointServiceAsyncClient +from .services.index_service import IndexServiceClient +from .services.index_service import IndexServiceAsyncClient +from .services.job_service import JobServiceClient +from .services.job_service import JobServiceAsyncClient +from .services.metadata_service import MetadataServiceClient +from .services.metadata_service import MetadataServiceAsyncClient +from .services.migration_service import MigrationServiceClient +from .services.migration_service import MigrationServiceAsyncClient +from .services.model_service import ModelServiceClient +from .services.model_service import ModelServiceAsyncClient +from .services.pipeline_service import PipelineServiceClient +from .services.pipeline_service import PipelineServiceAsyncClient +from .services.prediction_service import PredictionServiceClient +from .services.prediction_service import PredictionServiceAsyncClient +from .services.specialist_pool_service import SpecialistPoolServiceClient +from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient +from .services.tensorboard_service import TensorboardServiceClient +from .services.tensorboard_service import TensorboardServiceAsyncClient +from .services.vizier_service import VizierServiceClient +from .services.vizier_service import VizierServiceAsyncClient + +from .types.accelerator_type import AcceleratorType +from .types.annotation import Annotation +from .types.annotation_spec import AnnotationSpec +from .types.artifact import Artifact +from .types.batch_prediction_job import BatchPredictionJob +from .types.completion_stats import CompletionStats +from .types.context import Context +from .types.custom_job import ContainerSpec +from .types.custom_job import CustomJob +from .types.custom_job import CustomJobSpec +from .types.custom_job import PythonPackageSpec +from .types.custom_job import Scheduling +from .types.custom_job import WorkerPoolSpec +from .types.data_item import DataItem +from .types.data_labeling_job import ActiveLearningConfig +from .types.data_labeling_job import DataLabelingJob +from .types.data_labeling_job import SampleConfig +from .types.data_labeling_job import TrainingConfig +from .types.dataset import Dataset +from .types.dataset import ExportDataConfig +from .types.dataset import ImportDataConfig +from .types.dataset_service import CreateDatasetOperationMetadata +from .types.dataset_service import CreateDatasetRequest +from .types.dataset_service import DeleteDatasetRequest +from .types.dataset_service import ExportDataOperationMetadata +from .types.dataset_service import ExportDataRequest +from .types.dataset_service import ExportDataResponse +from .types.dataset_service import GetAnnotationSpecRequest +from .types.dataset_service import GetDatasetRequest +from .types.dataset_service import ImportDataOperationMetadata +from .types.dataset_service import ImportDataRequest +from .types.dataset_service import ImportDataResponse +from .types.dataset_service import ListAnnotationsRequest +from .types.dataset_service import ListAnnotationsResponse +from .types.dataset_service import ListDataItemsRequest +from .types.dataset_service import ListDataItemsResponse +from .types.dataset_service import ListDatasetsRequest +from .types.dataset_service import ListDatasetsResponse +from .types.dataset_service import UpdateDatasetRequest +from .types.deployed_index_ref import DeployedIndexRef +from .types.deployed_model_ref import DeployedModelRef +from .types.encryption_spec import EncryptionSpec +from .types.endpoint import DeployedModel +from .types.endpoint import Endpoint +from .types.endpoint import PredictRequestResponseLoggingConfig +from .types.endpoint import PrivateEndpoints +from .types.endpoint_service import CreateEndpointOperationMetadata +from .types.endpoint_service import CreateEndpointRequest +from .types.endpoint_service import DeleteEndpointRequest +from .types.endpoint_service import DeployModelOperationMetadata +from .types.endpoint_service import DeployModelRequest +from .types.endpoint_service import DeployModelResponse +from .types.endpoint_service import GetEndpointRequest +from .types.endpoint_service import ListEndpointsRequest +from .types.endpoint_service import ListEndpointsResponse +from .types.endpoint_service import UndeployModelOperationMetadata +from .types.endpoint_service import UndeployModelRequest +from .types.endpoint_service import UndeployModelResponse +from .types.endpoint_service import UpdateEndpointRequest +from .types.entity_type import EntityType +from .types.env_var import EnvVar +from .types.event import Event +from .types.execution import Execution +from .types.explanation import Attribution +from .types.explanation import BlurBaselineConfig +from .types.explanation import Examples +from .types.explanation import ExamplesOverride +from .types.explanation import ExamplesRestrictionsNamespace +from .types.explanation import Explanation +from .types.explanation import ExplanationMetadataOverride +from .types.explanation import ExplanationParameters +from .types.explanation import ExplanationSpec +from .types.explanation import ExplanationSpecOverride +from .types.explanation import FeatureNoiseSigma +from .types.explanation import IntegratedGradientsAttribution +from .types.explanation import ModelExplanation +from .types.explanation import Neighbor +from .types.explanation import Presets +from .types.explanation import SampledShapleyAttribution +from .types.explanation import SmoothGradConfig +from .types.explanation import XraiAttribution +from .types.explanation_metadata import ExplanationMetadata +from .types.feature import Feature +from .types.feature_monitoring_stats import FeatureStatsAnomaly +from .types.feature_selector import FeatureSelector +from .types.feature_selector import IdMatcher +from .types.featurestore import Featurestore +from .types.featurestore_monitoring import FeaturestoreMonitoringConfig +from .types.featurestore_online_service import FeatureValue +from .types.featurestore_online_service import FeatureValueList +from .types.featurestore_online_service import ReadFeatureValuesRequest +from .types.featurestore_online_service import ReadFeatureValuesResponse +from .types.featurestore_online_service import StreamingReadFeatureValuesRequest +from .types.featurestore_service import BatchCreateFeaturesOperationMetadata +from .types.featurestore_service import BatchCreateFeaturesRequest +from .types.featurestore_service import BatchCreateFeaturesResponse +from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from .types.featurestore_service import BatchReadFeatureValuesRequest +from .types.featurestore_service import BatchReadFeatureValuesResponse +from .types.featurestore_service import CreateEntityTypeOperationMetadata +from .types.featurestore_service import CreateEntityTypeRequest +from .types.featurestore_service import CreateFeatureOperationMetadata +from .types.featurestore_service import CreateFeatureRequest +from .types.featurestore_service import CreateFeaturestoreOperationMetadata +from .types.featurestore_service import CreateFeaturestoreRequest +from .types.featurestore_service import DeleteEntityTypeRequest +from .types.featurestore_service import DeleteFeatureRequest +from .types.featurestore_service import DeleteFeaturestoreRequest +from .types.featurestore_service import DestinationFeatureSetting +from .types.featurestore_service import ExportFeatureValuesOperationMetadata +from .types.featurestore_service import ExportFeatureValuesRequest +from .types.featurestore_service import ExportFeatureValuesResponse +from .types.featurestore_service import FeatureValueDestination +from .types.featurestore_service import GetEntityTypeRequest +from .types.featurestore_service import GetFeatureRequest +from .types.featurestore_service import GetFeaturestoreRequest +from .types.featurestore_service import ImportFeatureValuesOperationMetadata +from .types.featurestore_service import ImportFeatureValuesRequest +from .types.featurestore_service import ImportFeatureValuesResponse +from .types.featurestore_service import ListEntityTypesRequest +from .types.featurestore_service import ListEntityTypesResponse +from .types.featurestore_service import ListFeaturesRequest +from .types.featurestore_service import ListFeaturesResponse +from .types.featurestore_service import ListFeaturestoresRequest +from .types.featurestore_service import ListFeaturestoresResponse +from .types.featurestore_service import SearchFeaturesRequest +from .types.featurestore_service import SearchFeaturesResponse +from .types.featurestore_service import UpdateEntityTypeRequest +from .types.featurestore_service import UpdateFeatureRequest +from .types.featurestore_service import UpdateFeaturestoreOperationMetadata +from .types.featurestore_service import UpdateFeaturestoreRequest +from .types.hyperparameter_tuning_job import HyperparameterTuningJob +from .types.index import Index +from .types.index_endpoint import DeployedIndex +from .types.index_endpoint import DeployedIndexAuthConfig +from .types.index_endpoint import IndexEndpoint +from .types.index_endpoint import IndexPrivateEndpoints +from .types.index_endpoint_service import CreateIndexEndpointOperationMetadata +from .types.index_endpoint_service import CreateIndexEndpointRequest +from .types.index_endpoint_service import DeleteIndexEndpointRequest +from .types.index_endpoint_service import DeployIndexOperationMetadata +from .types.index_endpoint_service import DeployIndexRequest +from .types.index_endpoint_service import DeployIndexResponse +from .types.index_endpoint_service import GetIndexEndpointRequest +from .types.index_endpoint_service import ListIndexEndpointsRequest +from .types.index_endpoint_service import ListIndexEndpointsResponse +from .types.index_endpoint_service import MutateDeployedIndexOperationMetadata +from .types.index_endpoint_service import MutateDeployedIndexRequest +from .types.index_endpoint_service import MutateDeployedIndexResponse +from .types.index_endpoint_service import UndeployIndexOperationMetadata +from .types.index_endpoint_service import UndeployIndexRequest +from .types.index_endpoint_service import UndeployIndexResponse +from .types.index_endpoint_service import UpdateIndexEndpointRequest +from .types.index_service import CreateIndexOperationMetadata +from .types.index_service import CreateIndexRequest +from .types.index_service import DeleteIndexRequest +from .types.index_service import GetIndexRequest +from .types.index_service import ListIndexesRequest +from .types.index_service import ListIndexesResponse +from .types.index_service import NearestNeighborSearchOperationMetadata +from .types.index_service import UpdateIndexOperationMetadata +from .types.index_service import UpdateIndexRequest +from .types.io import AvroSource +from .types.io import BigQueryDestination +from .types.io import BigQuerySource +from .types.io import ContainerRegistryDestination +from .types.io import CsvDestination +from .types.io import CsvSource +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.io import TFRecordDestination +from .types.job_service import CancelBatchPredictionJobRequest +from .types.job_service import CancelCustomJobRequest +from .types.job_service import CancelDataLabelingJobRequest +from .types.job_service import CancelHyperparameterTuningJobRequest +from .types.job_service import CreateBatchPredictionJobRequest +from .types.job_service import CreateCustomJobRequest +from .types.job_service import CreateDataLabelingJobRequest +from .types.job_service import CreateHyperparameterTuningJobRequest +from .types.job_service import CreateModelDeploymentMonitoringJobRequest +from .types.job_service import DeleteBatchPredictionJobRequest +from .types.job_service import DeleteCustomJobRequest +from .types.job_service import DeleteDataLabelingJobRequest +from .types.job_service import DeleteHyperparameterTuningJobRequest +from .types.job_service import DeleteModelDeploymentMonitoringJobRequest +from .types.job_service import GetBatchPredictionJobRequest +from .types.job_service import GetCustomJobRequest +from .types.job_service import GetDataLabelingJobRequest +from .types.job_service import GetHyperparameterTuningJobRequest +from .types.job_service import GetModelDeploymentMonitoringJobRequest +from .types.job_service import ListBatchPredictionJobsRequest +from .types.job_service import ListBatchPredictionJobsResponse +from .types.job_service import ListCustomJobsRequest +from .types.job_service import ListCustomJobsResponse +from .types.job_service import ListDataLabelingJobsRequest +from .types.job_service import ListDataLabelingJobsResponse +from .types.job_service import ListHyperparameterTuningJobsRequest +from .types.job_service import ListHyperparameterTuningJobsResponse +from .types.job_service import ListModelDeploymentMonitoringJobsRequest +from .types.job_service import ListModelDeploymentMonitoringJobsResponse +from .types.job_service import PauseModelDeploymentMonitoringJobRequest +from .types.job_service import ResumeModelDeploymentMonitoringJobRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse +from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata +from .types.job_service import UpdateModelDeploymentMonitoringJobRequest +from .types.job_state import JobState +from .types.lineage_subgraph import LineageSubgraph +from .types.machine_resources import AutomaticResources +from .types.machine_resources import AutoscalingMetricSpec +from .types.machine_resources import BatchDedicatedResources +from .types.machine_resources import DedicatedResources +from .types.machine_resources import DiskSpec +from .types.machine_resources import MachineSpec +from .types.machine_resources import NfsMount +from .types.machine_resources import ResourcesConsumed +from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from .types.metadata_schema import MetadataSchema +from .types.metadata_service import AddContextArtifactsAndExecutionsRequest +from .types.metadata_service import AddContextArtifactsAndExecutionsResponse +from .types.metadata_service import AddContextChildrenRequest +from .types.metadata_service import AddContextChildrenResponse +from .types.metadata_service import AddExecutionEventsRequest +from .types.metadata_service import AddExecutionEventsResponse +from .types.metadata_service import CreateArtifactRequest +from .types.metadata_service import CreateContextRequest +from .types.metadata_service import CreateExecutionRequest +from .types.metadata_service import CreateMetadataSchemaRequest +from .types.metadata_service import CreateMetadataStoreOperationMetadata +from .types.metadata_service import CreateMetadataStoreRequest +from .types.metadata_service import DeleteArtifactRequest +from .types.metadata_service import DeleteContextRequest +from .types.metadata_service import DeleteExecutionRequest +from .types.metadata_service import DeleteMetadataStoreOperationMetadata +from .types.metadata_service import DeleteMetadataStoreRequest +from .types.metadata_service import GetArtifactRequest +from .types.metadata_service import GetContextRequest +from .types.metadata_service import GetExecutionRequest +from .types.metadata_service import GetMetadataSchemaRequest +from .types.metadata_service import GetMetadataStoreRequest +from .types.metadata_service import ListArtifactsRequest +from .types.metadata_service import ListArtifactsResponse +from .types.metadata_service import ListContextsRequest +from .types.metadata_service import ListContextsResponse +from .types.metadata_service import ListExecutionsRequest +from .types.metadata_service import ListExecutionsResponse +from .types.metadata_service import ListMetadataSchemasRequest +from .types.metadata_service import ListMetadataSchemasResponse +from .types.metadata_service import ListMetadataStoresRequest +from .types.metadata_service import ListMetadataStoresResponse +from .types.metadata_service import PurgeArtifactsMetadata +from .types.metadata_service import PurgeArtifactsRequest +from .types.metadata_service import PurgeArtifactsResponse +from .types.metadata_service import PurgeContextsMetadata +from .types.metadata_service import PurgeContextsRequest +from .types.metadata_service import PurgeContextsResponse +from .types.metadata_service import PurgeExecutionsMetadata +from .types.metadata_service import PurgeExecutionsRequest +from .types.metadata_service import PurgeExecutionsResponse +from .types.metadata_service import QueryArtifactLineageSubgraphRequest +from .types.metadata_service import QueryContextLineageSubgraphRequest +from .types.metadata_service import QueryExecutionInputsAndOutputsRequest +from .types.metadata_service import UpdateArtifactRequest +from .types.metadata_service import UpdateContextRequest +from .types.metadata_service import UpdateExecutionRequest +from .types.metadata_store import MetadataStore +from .types.migratable_resource import MigratableResource +from .types.migration_service import BatchMigrateResourcesOperationMetadata +from .types.migration_service import BatchMigrateResourcesRequest +from .types.migration_service import BatchMigrateResourcesResponse +from .types.migration_service import MigrateResourceRequest +from .types.migration_service import MigrateResourceResponse +from .types.migration_service import SearchMigratableResourcesRequest +from .types.migration_service import SearchMigratableResourcesResponse +from .types.model import Model +from .types.model import ModelContainerSpec +from .types.model import Port +from .types.model import PredictSchemata +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig +from .types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType +from .types.model_evaluation import ModelEvaluation +from .types.model_evaluation_slice import ModelEvaluationSlice +from .types.model_monitoring import ModelMonitoringAlertConfig +from .types.model_monitoring import ModelMonitoringObjectiveConfig +from .types.model_monitoring import SamplingStrategy +from .types.model_monitoring import ThresholdConfig +from .types.model_service import DeleteModelRequest +from .types.model_service import DeleteModelVersionRequest +from .types.model_service import ExportModelOperationMetadata +from .types.model_service import ExportModelRequest +from .types.model_service import ExportModelResponse +from .types.model_service import GetModelEvaluationRequest +from .types.model_service import GetModelEvaluationSliceRequest +from .types.model_service import GetModelRequest +from .types.model_service import ImportModelEvaluationRequest +from .types.model_service import ListModelEvaluationSlicesRequest +from .types.model_service import ListModelEvaluationSlicesResponse +from .types.model_service import ListModelEvaluationsRequest +from .types.model_service import ListModelEvaluationsResponse +from .types.model_service import ListModelsRequest +from .types.model_service import ListModelsResponse +from .types.model_service import ListModelVersionsRequest +from .types.model_service import ListModelVersionsResponse +from .types.model_service import MergeVersionAliasesRequest +from .types.model_service import UpdateExplanationDatasetOperationMetadata +from .types.model_service import UpdateExplanationDatasetRequest +from .types.model_service import UpdateExplanationDatasetResponse +from .types.model_service import UpdateModelRequest +from .types.model_service import UploadModelOperationMetadata +from .types.model_service import UploadModelRequest +from .types.model_service import UploadModelResponse +from .types.operation import DeleteOperationMetadata +from .types.operation import GenericOperationMetadata +from .types.pipeline_failure_policy import PipelineFailurePolicy +from .types.pipeline_job import PipelineJob +from .types.pipeline_job import PipelineJobDetail +from .types.pipeline_job import PipelineTaskDetail +from .types.pipeline_job import PipelineTaskExecutorDetail +from .types.pipeline_job import PipelineTemplateMetadata +from .types.pipeline_service import CancelPipelineJobRequest +from .types.pipeline_service import CancelTrainingPipelineRequest +from .types.pipeline_service import CreatePipelineJobRequest +from .types.pipeline_service import CreateTrainingPipelineRequest +from .types.pipeline_service import DeletePipelineJobRequest +from .types.pipeline_service import DeleteTrainingPipelineRequest +from .types.pipeline_service import GetPipelineJobRequest +from .types.pipeline_service import GetTrainingPipelineRequest +from .types.pipeline_service import ListPipelineJobsRequest +from .types.pipeline_service import ListPipelineJobsResponse +from .types.pipeline_service import ListTrainingPipelinesRequest +from .types.pipeline_service import ListTrainingPipelinesResponse +from .types.pipeline_state import PipelineState +from .types.prediction_service import ExplainRequest +from .types.prediction_service import ExplainResponse +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.prediction_service import RawPredictRequest +from .types.specialist_pool import SpecialistPool +from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import CreateSpecialistPoolRequest +from .types.specialist_pool_service import DeleteSpecialistPoolRequest +from .types.specialist_pool_service import GetSpecialistPoolRequest +from .types.specialist_pool_service import ListSpecialistPoolsRequest +from .types.specialist_pool_service import ListSpecialistPoolsResponse +from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import UpdateSpecialistPoolRequest +from .types.study import Measurement +from .types.study import Study +from .types.study import StudySpec +from .types.study import Trial +from .types.tensorboard import Tensorboard +from .types.tensorboard_data import Scalar +from .types.tensorboard_data import TensorboardBlob +from .types.tensorboard_data import TensorboardBlobSequence +from .types.tensorboard_data import TensorboardTensor +from .types.tensorboard_data import TimeSeriesData +from .types.tensorboard_data import TimeSeriesDataPoint +from .types.tensorboard_experiment import TensorboardExperiment +from .types.tensorboard_run import TensorboardRun +from .types.tensorboard_service import BatchCreateTensorboardRunsRequest +from .types.tensorboard_service import BatchCreateTensorboardRunsResponse +from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest +from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse +from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import CreateTensorboardExperimentRequest +from .types.tensorboard_service import CreateTensorboardOperationMetadata +from .types.tensorboard_service import CreateTensorboardRequest +from .types.tensorboard_service import CreateTensorboardRunRequest +from .types.tensorboard_service import CreateTensorboardTimeSeriesRequest +from .types.tensorboard_service import DeleteTensorboardExperimentRequest +from .types.tensorboard_service import DeleteTensorboardRequest +from .types.tensorboard_service import DeleteTensorboardRunRequest +from .types.tensorboard_service import DeleteTensorboardTimeSeriesRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import GetTensorboardExperimentRequest +from .types.tensorboard_service import GetTensorboardRequest +from .types.tensorboard_service import GetTensorboardRunRequest +from .types.tensorboard_service import GetTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardExperimentsRequest +from .types.tensorboard_service import ListTensorboardExperimentsResponse +from .types.tensorboard_service import ListTensorboardRunsRequest +from .types.tensorboard_service import ListTensorboardRunsResponse +from .types.tensorboard_service import ListTensorboardsRequest +from .types.tensorboard_service import ListTensorboardsResponse +from .types.tensorboard_service import ListTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardTimeSeriesResponse +from .types.tensorboard_service import ReadTensorboardBlobDataRequest +from .types.tensorboard_service import ReadTensorboardBlobDataResponse +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import UpdateTensorboardExperimentRequest +from .types.tensorboard_service import UpdateTensorboardOperationMetadata +from .types.tensorboard_service import UpdateTensorboardRequest +from .types.tensorboard_service import UpdateTensorboardRunRequest +from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from .types.tensorboard_service import WriteTensorboardExperimentDataRequest +from .types.tensorboard_service import WriteTensorboardExperimentDataResponse +from .types.tensorboard_service import WriteTensorboardRunDataRequest +from .types.tensorboard_service import WriteTensorboardRunDataResponse +from .types.tensorboard_time_series import TensorboardTimeSeries +from .types.training_pipeline import FilterSplit +from .types.training_pipeline import FractionSplit +from .types.training_pipeline import InputDataConfig +from .types.training_pipeline import PredefinedSplit +from .types.training_pipeline import StratifiedSplit +from .types.training_pipeline import TimestampSplit +from .types.training_pipeline import TrainingPipeline +from .types.types import BoolArray +from .types.types import DoubleArray +from .types.types import Int64Array +from .types.types import StringArray +from .types.unmanaged_container_model import UnmanagedContainerModel +from .types.user_action_reference import UserActionReference +from .types.value import Value +from .types.vizier_service import AddTrialMeasurementRequest +from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata +from .types.vizier_service import CheckTrialEarlyStoppingStateRequest +from .types.vizier_service import CheckTrialEarlyStoppingStateResponse +from .types.vizier_service import CompleteTrialRequest +from .types.vizier_service import CreateStudyRequest +from .types.vizier_service import CreateTrialRequest +from .types.vizier_service import DeleteStudyRequest +from .types.vizier_service import DeleteTrialRequest +from .types.vizier_service import GetStudyRequest +from .types.vizier_service import GetTrialRequest +from .types.vizier_service import ListOptimalTrialsRequest +from .types.vizier_service import ListOptimalTrialsResponse +from .types.vizier_service import ListStudiesRequest +from .types.vizier_service import ListStudiesResponse +from .types.vizier_service import ListTrialsRequest +from .types.vizier_service import ListTrialsResponse +from .types.vizier_service import LookupStudyRequest +from .types.vizier_service import StopTrialRequest +from .types.vizier_service import SuggestTrialsMetadata +from .types.vizier_service import SuggestTrialsRequest +from .types.vizier_service import SuggestTrialsResponse + +__all__ = ( + 'DatasetServiceAsyncClient', + 'EndpointServiceAsyncClient', + 'FeaturestoreOnlineServingServiceAsyncClient', + 'FeaturestoreServiceAsyncClient', + 'IndexEndpointServiceAsyncClient', + 'IndexServiceAsyncClient', + 'JobServiceAsyncClient', + 'MetadataServiceAsyncClient', + 'MigrationServiceAsyncClient', + 'ModelServiceAsyncClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceAsyncClient', + 'TensorboardServiceAsyncClient', + 'VizierServiceAsyncClient', +'AcceleratorType', +'ActiveLearningConfig', +'AddContextArtifactsAndExecutionsRequest', +'AddContextArtifactsAndExecutionsResponse', +'AddContextChildrenRequest', +'AddContextChildrenResponse', +'AddExecutionEventsRequest', +'AddExecutionEventsResponse', +'AddTrialMeasurementRequest', +'Annotation', +'AnnotationSpec', +'Artifact', +'Attribution', +'AutomaticResources', +'AutoscalingMetricSpec', +'AvroSource', +'BatchCreateFeaturesOperationMetadata', +'BatchCreateFeaturesRequest', +'BatchCreateFeaturesResponse', +'BatchCreateTensorboardRunsRequest', +'BatchCreateTensorboardRunsResponse', +'BatchCreateTensorboardTimeSeriesRequest', +'BatchCreateTensorboardTimeSeriesResponse', +'BatchDedicatedResources', +'BatchMigrateResourcesOperationMetadata', +'BatchMigrateResourcesRequest', +'BatchMigrateResourcesResponse', +'BatchPredictionJob', +'BatchReadFeatureValuesOperationMetadata', +'BatchReadFeatureValuesRequest', +'BatchReadFeatureValuesResponse', +'BatchReadTensorboardTimeSeriesDataRequest', +'BatchReadTensorboardTimeSeriesDataResponse', +'BigQueryDestination', +'BigQuerySource', +'BlurBaselineConfig', +'BoolArray', +'CancelBatchPredictionJobRequest', +'CancelCustomJobRequest', +'CancelDataLabelingJobRequest', +'CancelHyperparameterTuningJobRequest', +'CancelPipelineJobRequest', +'CancelTrainingPipelineRequest', +'CheckTrialEarlyStoppingStateMetatdata', +'CheckTrialEarlyStoppingStateRequest', +'CheckTrialEarlyStoppingStateResponse', +'CompleteTrialRequest', +'CompletionStats', +'ContainerRegistryDestination', +'ContainerSpec', +'Context', +'CreateArtifactRequest', +'CreateBatchPredictionJobRequest', +'CreateContextRequest', +'CreateCustomJobRequest', +'CreateDataLabelingJobRequest', +'CreateDatasetOperationMetadata', +'CreateDatasetRequest', +'CreateEndpointOperationMetadata', +'CreateEndpointRequest', +'CreateEntityTypeOperationMetadata', +'CreateEntityTypeRequest', +'CreateExecutionRequest', +'CreateFeatureOperationMetadata', +'CreateFeatureRequest', +'CreateFeaturestoreOperationMetadata', +'CreateFeaturestoreRequest', +'CreateHyperparameterTuningJobRequest', +'CreateIndexEndpointOperationMetadata', +'CreateIndexEndpointRequest', +'CreateIndexOperationMetadata', +'CreateIndexRequest', +'CreateMetadataSchemaRequest', +'CreateMetadataStoreOperationMetadata', +'CreateMetadataStoreRequest', +'CreateModelDeploymentMonitoringJobRequest', +'CreatePipelineJobRequest', +'CreateSpecialistPoolOperationMetadata', +'CreateSpecialistPoolRequest', +'CreateStudyRequest', +'CreateTensorboardExperimentRequest', +'CreateTensorboardOperationMetadata', +'CreateTensorboardRequest', +'CreateTensorboardRunRequest', +'CreateTensorboardTimeSeriesRequest', +'CreateTrainingPipelineRequest', +'CreateTrialRequest', +'CsvDestination', +'CsvSource', +'CustomJob', +'CustomJobSpec', +'DataItem', +'DataLabelingJob', +'Dataset', +'DatasetServiceClient', +'DedicatedResources', +'DeleteArtifactRequest', +'DeleteBatchPredictionJobRequest', +'DeleteContextRequest', +'DeleteCustomJobRequest', +'DeleteDataLabelingJobRequest', +'DeleteDatasetRequest', +'DeleteEndpointRequest', +'DeleteEntityTypeRequest', +'DeleteExecutionRequest', +'DeleteFeatureRequest', +'DeleteFeaturestoreRequest', +'DeleteHyperparameterTuningJobRequest', +'DeleteIndexEndpointRequest', +'DeleteIndexRequest', +'DeleteMetadataStoreOperationMetadata', +'DeleteMetadataStoreRequest', +'DeleteModelDeploymentMonitoringJobRequest', +'DeleteModelRequest', +'DeleteModelVersionRequest', +'DeleteOperationMetadata', +'DeletePipelineJobRequest', +'DeleteSpecialistPoolRequest', +'DeleteStudyRequest', +'DeleteTensorboardExperimentRequest', +'DeleteTensorboardRequest', +'DeleteTensorboardRunRequest', +'DeleteTensorboardTimeSeriesRequest', +'DeleteTrainingPipelineRequest', +'DeleteTrialRequest', +'DeployIndexOperationMetadata', +'DeployIndexRequest', +'DeployIndexResponse', +'DeployModelOperationMetadata', +'DeployModelRequest', +'DeployModelResponse', +'DeployedIndex', +'DeployedIndexAuthConfig', +'DeployedIndexRef', +'DeployedModel', +'DeployedModelRef', +'DestinationFeatureSetting', +'DiskSpec', +'DoubleArray', +'EncryptionSpec', +'Endpoint', +'EndpointServiceClient', +'EntityType', +'EnvVar', +'Event', +'Examples', +'ExamplesOverride', +'ExamplesRestrictionsNamespace', +'Execution', +'ExplainRequest', +'ExplainResponse', +'Explanation', +'ExplanationMetadata', +'ExplanationMetadataOverride', +'ExplanationParameters', +'ExplanationSpec', +'ExplanationSpecOverride', +'ExportDataConfig', +'ExportDataOperationMetadata', +'ExportDataRequest', +'ExportDataResponse', +'ExportFeatureValuesOperationMetadata', +'ExportFeatureValuesRequest', +'ExportFeatureValuesResponse', +'ExportModelOperationMetadata', +'ExportModelRequest', +'ExportModelResponse', +'ExportTensorboardTimeSeriesDataRequest', +'ExportTensorboardTimeSeriesDataResponse', +'Feature', +'FeatureNoiseSigma', +'FeatureSelector', +'FeatureStatsAnomaly', +'FeatureValue', +'FeatureValueDestination', +'FeatureValueList', +'Featurestore', +'FeaturestoreMonitoringConfig', +'FeaturestoreOnlineServingServiceClient', +'FeaturestoreServiceClient', +'FilterSplit', +'FractionSplit', +'GcsDestination', +'GcsSource', +'GenericOperationMetadata', +'GetAnnotationSpecRequest', +'GetArtifactRequest', +'GetBatchPredictionJobRequest', +'GetContextRequest', +'GetCustomJobRequest', +'GetDataLabelingJobRequest', +'GetDatasetRequest', +'GetEndpointRequest', +'GetEntityTypeRequest', +'GetExecutionRequest', +'GetFeatureRequest', +'GetFeaturestoreRequest', +'GetHyperparameterTuningJobRequest', +'GetIndexEndpointRequest', +'GetIndexRequest', +'GetMetadataSchemaRequest', +'GetMetadataStoreRequest', +'GetModelDeploymentMonitoringJobRequest', +'GetModelEvaluationRequest', +'GetModelEvaluationSliceRequest', +'GetModelRequest', +'GetPipelineJobRequest', +'GetSpecialistPoolRequest', +'GetStudyRequest', +'GetTensorboardExperimentRequest', +'GetTensorboardRequest', +'GetTensorboardRunRequest', +'GetTensorboardTimeSeriesRequest', +'GetTrainingPipelineRequest', +'GetTrialRequest', +'HyperparameterTuningJob', +'IdMatcher', +'ImportDataConfig', +'ImportDataOperationMetadata', +'ImportDataRequest', +'ImportDataResponse', +'ImportFeatureValuesOperationMetadata', +'ImportFeatureValuesRequest', +'ImportFeatureValuesResponse', +'ImportModelEvaluationRequest', +'Index', +'IndexEndpoint', +'IndexEndpointServiceClient', +'IndexPrivateEndpoints', +'IndexServiceClient', +'InputDataConfig', +'Int64Array', +'IntegratedGradientsAttribution', +'JobServiceClient', +'JobState', +'LineageSubgraph', +'ListAnnotationsRequest', +'ListAnnotationsResponse', +'ListArtifactsRequest', +'ListArtifactsResponse', +'ListBatchPredictionJobsRequest', +'ListBatchPredictionJobsResponse', +'ListContextsRequest', +'ListContextsResponse', +'ListCustomJobsRequest', +'ListCustomJobsResponse', +'ListDataItemsRequest', +'ListDataItemsResponse', +'ListDataLabelingJobsRequest', +'ListDataLabelingJobsResponse', +'ListDatasetsRequest', +'ListDatasetsResponse', +'ListEndpointsRequest', +'ListEndpointsResponse', +'ListEntityTypesRequest', +'ListEntityTypesResponse', +'ListExecutionsRequest', +'ListExecutionsResponse', +'ListFeaturesRequest', +'ListFeaturesResponse', +'ListFeaturestoresRequest', +'ListFeaturestoresResponse', +'ListHyperparameterTuningJobsRequest', +'ListHyperparameterTuningJobsResponse', +'ListIndexEndpointsRequest', +'ListIndexEndpointsResponse', +'ListIndexesRequest', +'ListIndexesResponse', +'ListMetadataSchemasRequest', +'ListMetadataSchemasResponse', +'ListMetadataStoresRequest', +'ListMetadataStoresResponse', +'ListModelDeploymentMonitoringJobsRequest', +'ListModelDeploymentMonitoringJobsResponse', +'ListModelEvaluationSlicesRequest', +'ListModelEvaluationSlicesResponse', +'ListModelEvaluationsRequest', +'ListModelEvaluationsResponse', +'ListModelVersionsRequest', +'ListModelVersionsResponse', +'ListModelsRequest', +'ListModelsResponse', +'ListOptimalTrialsRequest', +'ListOptimalTrialsResponse', +'ListPipelineJobsRequest', +'ListPipelineJobsResponse', +'ListSpecialistPoolsRequest', +'ListSpecialistPoolsResponse', +'ListStudiesRequest', +'ListStudiesResponse', +'ListTensorboardExperimentsRequest', +'ListTensorboardExperimentsResponse', +'ListTensorboardRunsRequest', +'ListTensorboardRunsResponse', +'ListTensorboardTimeSeriesRequest', +'ListTensorboardTimeSeriesResponse', +'ListTensorboardsRequest', +'ListTensorboardsResponse', +'ListTrainingPipelinesRequest', +'ListTrainingPipelinesResponse', +'ListTrialsRequest', +'ListTrialsResponse', +'LookupStudyRequest', +'MachineSpec', +'ManualBatchTuningParameters', +'Measurement', +'MergeVersionAliasesRequest', +'MetadataSchema', +'MetadataServiceClient', +'MetadataStore', +'MigratableResource', +'MigrateResourceRequest', +'MigrateResourceResponse', +'MigrationServiceClient', +'Model', +'ModelContainerSpec', +'ModelDeploymentMonitoringBigQueryTable', +'ModelDeploymentMonitoringJob', +'ModelDeploymentMonitoringObjectiveConfig', +'ModelDeploymentMonitoringObjectiveType', +'ModelDeploymentMonitoringScheduleConfig', +'ModelEvaluation', +'ModelEvaluationSlice', +'ModelExplanation', +'ModelMonitoringAlertConfig', +'ModelMonitoringObjectiveConfig', +'ModelMonitoringStatsAnomalies', +'ModelServiceClient', +'MutateDeployedIndexOperationMetadata', +'MutateDeployedIndexRequest', +'MutateDeployedIndexResponse', +'NearestNeighborSearchOperationMetadata', +'Neighbor', +'NfsMount', +'PauseModelDeploymentMonitoringJobRequest', +'PipelineFailurePolicy', +'PipelineJob', +'PipelineJobDetail', +'PipelineServiceClient', +'PipelineState', +'PipelineTaskDetail', +'PipelineTaskExecutorDetail', +'PipelineTemplateMetadata', +'Port', +'PredefinedSplit', +'PredictRequest', +'PredictRequestResponseLoggingConfig', +'PredictResponse', +'PredictSchemata', +'PredictionServiceClient', +'Presets', +'PrivateEndpoints', +'PurgeArtifactsMetadata', +'PurgeArtifactsRequest', +'PurgeArtifactsResponse', +'PurgeContextsMetadata', +'PurgeContextsRequest', +'PurgeContextsResponse', +'PurgeExecutionsMetadata', +'PurgeExecutionsRequest', +'PurgeExecutionsResponse', +'PythonPackageSpec', +'QueryArtifactLineageSubgraphRequest', +'QueryContextLineageSubgraphRequest', +'QueryExecutionInputsAndOutputsRequest', +'RawPredictRequest', +'ReadFeatureValuesRequest', +'ReadFeatureValuesResponse', +'ReadTensorboardBlobDataRequest', +'ReadTensorboardBlobDataResponse', +'ReadTensorboardTimeSeriesDataRequest', +'ReadTensorboardTimeSeriesDataResponse', +'ResourcesConsumed', +'ResumeModelDeploymentMonitoringJobRequest', +'SampleConfig', +'SampledShapleyAttribution', +'SamplingStrategy', +'Scalar', +'Scheduling', +'SearchFeaturesRequest', +'SearchFeaturesResponse', +'SearchMigratableResourcesRequest', +'SearchMigratableResourcesResponse', +'SearchModelDeploymentMonitoringStatsAnomaliesRequest', +'SearchModelDeploymentMonitoringStatsAnomaliesResponse', +'SmoothGradConfig', +'SpecialistPool', +'SpecialistPoolServiceClient', +'StopTrialRequest', +'StratifiedSplit', +'StreamingReadFeatureValuesRequest', +'StringArray', +'Study', +'StudySpec', +'SuggestTrialsMetadata', +'SuggestTrialsRequest', +'SuggestTrialsResponse', +'TFRecordDestination', +'Tensorboard', +'TensorboardBlob', +'TensorboardBlobSequence', +'TensorboardExperiment', +'TensorboardRun', +'TensorboardServiceClient', +'TensorboardTensor', +'TensorboardTimeSeries', +'ThresholdConfig', +'TimeSeriesData', +'TimeSeriesDataPoint', +'TimestampSplit', +'TrainingConfig', +'TrainingPipeline', +'Trial', +'UndeployIndexOperationMetadata', +'UndeployIndexRequest', +'UndeployIndexResponse', +'UndeployModelOperationMetadata', +'UndeployModelRequest', +'UndeployModelResponse', +'UnmanagedContainerModel', +'UpdateArtifactRequest', +'UpdateContextRequest', +'UpdateDatasetRequest', +'UpdateEndpointRequest', +'UpdateEntityTypeRequest', +'UpdateExecutionRequest', +'UpdateExplanationDatasetOperationMetadata', +'UpdateExplanationDatasetRequest', +'UpdateExplanationDatasetResponse', +'UpdateFeatureRequest', +'UpdateFeaturestoreOperationMetadata', +'UpdateFeaturestoreRequest', +'UpdateIndexEndpointRequest', +'UpdateIndexOperationMetadata', +'UpdateIndexRequest', +'UpdateModelDeploymentMonitoringJobOperationMetadata', +'UpdateModelDeploymentMonitoringJobRequest', +'UpdateModelRequest', +'UpdateSpecialistPoolOperationMetadata', +'UpdateSpecialistPoolRequest', +'UpdateTensorboardExperimentRequest', +'UpdateTensorboardOperationMetadata', +'UpdateTensorboardRequest', +'UpdateTensorboardRunRequest', +'UpdateTensorboardTimeSeriesRequest', +'UploadModelOperationMetadata', +'UploadModelRequest', +'UploadModelResponse', +'UserActionReference', +'Value', +'VizierServiceClient', +'WorkerPoolSpec', +'WriteTensorboardExperimentDataRequest', +'WriteTensorboardExperimentDataResponse', +'WriteTensorboardRunDataRequest', +'WriteTensorboardRunDataResponse', +'XraiAttribution', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..777e311ce7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -0,0 +1,2109 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1", + "schema": "1.0", + "services": { + "DatasetService": { + "clients": { + "grpc": { + "libraryClient": "DatasetServiceClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + }, + "grpc-async": { + "libraryClient": "DatasetServiceAsyncClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + } + } + }, + "EndpointService": { + "clients": { + "grpc": { + "libraryClient": "EndpointServiceClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + }, + "grpc-async": { + "libraryClient": "EndpointServiceAsyncClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + } + } + }, + "FeaturestoreOnlineServingService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreOnlineServingServiceClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreOnlineServingServiceAsyncClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + } + } + }, + "FeaturestoreService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreServiceClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreServiceAsyncClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + } + } + }, + "IndexEndpointService": { + "clients": { + "grpc": { + "libraryClient": "IndexEndpointServiceClient", + "rpcs": { + "CreateIndexEndpoint": { + "methods": [ + "create_index_endpoint" + ] + }, + "DeleteIndexEndpoint": { + "methods": [ + "delete_index_endpoint" + ] + }, + "DeployIndex": { + "methods": [ + "deploy_index" + ] + }, + "GetIndexEndpoint": { + "methods": [ + "get_index_endpoint" + ] + }, + "ListIndexEndpoints": { + "methods": [ + "list_index_endpoints" + ] + }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, + "UndeployIndex": { + "methods": [ + "undeploy_index" + ] + }, + "UpdateIndexEndpoint": { + "methods": [ + "update_index_endpoint" + ] + } + } + }, + "grpc-async": { + "libraryClient": "IndexEndpointServiceAsyncClient", + "rpcs": { + "CreateIndexEndpoint": { + "methods": [ + "create_index_endpoint" + ] + }, + "DeleteIndexEndpoint": { + "methods": [ + "delete_index_endpoint" + ] + }, + "DeployIndex": { + "methods": [ + "deploy_index" + ] + }, + "GetIndexEndpoint": { + "methods": [ + "get_index_endpoint" + ] + }, + "ListIndexEndpoints": { + "methods": [ + "list_index_endpoints" + ] + }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, + "UndeployIndex": { + "methods": [ + "undeploy_index" + ] + }, + "UpdateIndexEndpoint": { + "methods": [ + "update_index_endpoint" + ] + } + } + } + } + }, + "IndexService": { + "clients": { + "grpc": { + "libraryClient": "IndexServiceClient", + "rpcs": { + "CreateIndex": { + "methods": [ + "create_index" + ] + }, + "DeleteIndex": { + "methods": [ + "delete_index" + ] + }, + "GetIndex": { + "methods": [ + "get_index" + ] + }, + "ListIndexes": { + "methods": [ + "list_indexes" + ] + }, + "UpdateIndex": { + "methods": [ + "update_index" + ] + } + } + }, + "grpc-async": { + "libraryClient": "IndexServiceAsyncClient", + "rpcs": { + "CreateIndex": { + "methods": [ + "create_index" + ] + }, + "DeleteIndex": { + "methods": [ + "delete_index" + ] + }, + "GetIndex": { + "methods": [ + "get_index" + ] + }, + "ListIndexes": { + "methods": [ + "list_indexes" + ] + }, + "UpdateIndex": { + "methods": [ + "update_index" + ] + } + } + } + } + }, + "JobService": { + "clients": { + "grpc": { + "libraryClient": "JobServiceClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "CreateModelDeploymentMonitoringJob": { + "methods": [ + "create_model_deployment_monitoring_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "DeleteModelDeploymentMonitoringJob": { + "methods": [ + "delete_model_deployment_monitoring_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "GetModelDeploymentMonitoringJob": { + "methods": [ + "get_model_deployment_monitoring_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + }, + "ListModelDeploymentMonitoringJobs": { + "methods": [ + "list_model_deployment_monitoring_jobs" + ] + }, + "PauseModelDeploymentMonitoringJob": { + "methods": [ + "pause_model_deployment_monitoring_job" + ] + }, + "ResumeModelDeploymentMonitoringJob": { + "methods": [ + "resume_model_deployment_monitoring_job" + ] + }, + "SearchModelDeploymentMonitoringStatsAnomalies": { + "methods": [ + "search_model_deployment_monitoring_stats_anomalies" + ] + }, + "UpdateModelDeploymentMonitoringJob": { + "methods": [ + "update_model_deployment_monitoring_job" + ] + } + } + }, + "grpc-async": { + "libraryClient": "JobServiceAsyncClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "CreateModelDeploymentMonitoringJob": { + "methods": [ + "create_model_deployment_monitoring_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "DeleteModelDeploymentMonitoringJob": { + "methods": [ + "delete_model_deployment_monitoring_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "GetModelDeploymentMonitoringJob": { + "methods": [ + "get_model_deployment_monitoring_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + }, + "ListModelDeploymentMonitoringJobs": { + "methods": [ + "list_model_deployment_monitoring_jobs" + ] + }, + "PauseModelDeploymentMonitoringJob": { + "methods": [ + "pause_model_deployment_monitoring_job" + ] + }, + "ResumeModelDeploymentMonitoringJob": { + "methods": [ + "resume_model_deployment_monitoring_job" + ] + }, + "SearchModelDeploymentMonitoringStatsAnomalies": { + "methods": [ + "search_model_deployment_monitoring_stats_anomalies" + ] + }, + "UpdateModelDeploymentMonitoringJob": { + "methods": [ + "update_model_deployment_monitoring_job" + ] + } + } + } + } + }, + "MetadataService": { + "clients": { + "grpc": { + "libraryClient": "MetadataServiceClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteArtifact": { + "methods": [ + "delete_artifact" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteExecution": { + "methods": [ + "delete_execution" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "PurgeArtifacts": { + "methods": [ + "purge_artifacts" + ] + }, + "PurgeContexts": { + "methods": [ + "purge_contexts" + ] + }, + "PurgeExecutions": { + "methods": [ + "purge_executions" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MetadataServiceAsyncClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteArtifact": { + "methods": [ + "delete_artifact" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteExecution": { + "methods": [ + "delete_execution" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "PurgeArtifacts": { + "methods": [ + "purge_artifacts" + ] + }, + "PurgeContexts": { + "methods": [ + "purge_contexts" + ] + }, + "PurgeExecutions": { + "methods": [ + "purge_executions" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + } + } + }, + "MigrationService": { + "clients": { + "grpc": { + "libraryClient": "MigrationServiceClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MigrationServiceAsyncClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + } + } + }, + "ModelService": { + "clients": { + "grpc": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeleteModelVersion": { + "methods": [ + "delete_model_version" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModelVersions": { + "methods": [ + "list_model_versions" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "MergeVersionAliases": { + "methods": [ + "merge_version_aliases" + ] + }, + "UpdateExplanationDataset": { + "methods": [ + "update_explanation_dataset" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ModelServiceAsyncClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "DeleteModelVersion": { + "methods": [ + "delete_model_version" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModelVersions": { + "methods": [ + "list_model_versions" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "MergeVersionAliases": { + "methods": [ + "merge_version_aliases" + ] + }, + "UpdateExplanationDataset": { + "methods": [ + "update_explanation_dataset" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + } + } + }, + "PipelineService": { + "clients": { + "grpc": { + "libraryClient": "PipelineServiceClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PipelineServiceAsyncClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + } + } + }, + "PredictionService": { + "clients": { + "grpc": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "Explain": { + "methods": [ + "explain" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + }, + "RawPredict": { + "methods": [ + "raw_predict" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PredictionServiceAsyncClient", + "rpcs": { + "Explain": { + "methods": [ + "explain" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + }, + "RawPredict": { + "methods": [ + "raw_predict" + ] + } + } + } + } + }, + "SpecialistPoolService": { + "clients": { + "grpc": { + "libraryClient": "SpecialistPoolServiceClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + }, + "grpc-async": { + "libraryClient": "SpecialistPoolServiceAsyncClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + } + } + }, + "TensorboardService": { + "clients": { + "grpc": { + "libraryClient": "TensorboardServiceClient", + "rpcs": { + "BatchCreateTensorboardRuns": { + "methods": [ + "batch_create_tensorboard_runs" + ] + }, + "BatchCreateTensorboardTimeSeries": { + "methods": [ + "batch_create_tensorboard_time_series" + ] + }, + "BatchReadTensorboardTimeSeriesData": { + "methods": [ + "batch_read_tensorboard_time_series_data" + ] + }, + "CreateTensorboard": { + "methods": [ + "create_tensorboard" + ] + }, + "CreateTensorboardExperiment": { + "methods": [ + "create_tensorboard_experiment" + ] + }, + "CreateTensorboardRun": { + "methods": [ + "create_tensorboard_run" + ] + }, + "CreateTensorboardTimeSeries": { + "methods": [ + "create_tensorboard_time_series" + ] + }, + "DeleteTensorboard": { + "methods": [ + "delete_tensorboard" + ] + }, + "DeleteTensorboardExperiment": { + "methods": [ + "delete_tensorboard_experiment" + ] + }, + "DeleteTensorboardRun": { + "methods": [ + "delete_tensorboard_run" + ] + }, + "DeleteTensorboardTimeSeries": { + "methods": [ + "delete_tensorboard_time_series" + ] + }, + "ExportTensorboardTimeSeriesData": { + "methods": [ + "export_tensorboard_time_series_data" + ] + }, + "GetTensorboard": { + "methods": [ + "get_tensorboard" + ] + }, + "GetTensorboardExperiment": { + "methods": [ + "get_tensorboard_experiment" + ] + }, + "GetTensorboardRun": { + "methods": [ + "get_tensorboard_run" + ] + }, + "GetTensorboardTimeSeries": { + "methods": [ + "get_tensorboard_time_series" + ] + }, + "ListTensorboardExperiments": { + "methods": [ + "list_tensorboard_experiments" + ] + }, + "ListTensorboardRuns": { + "methods": [ + "list_tensorboard_runs" + ] + }, + "ListTensorboardTimeSeries": { + "methods": [ + "list_tensorboard_time_series" + ] + }, + "ListTensorboards": { + "methods": [ + "list_tensorboards" + ] + }, + "ReadTensorboardBlobData": { + "methods": [ + "read_tensorboard_blob_data" + ] + }, + "ReadTensorboardTimeSeriesData": { + "methods": [ + "read_tensorboard_time_series_data" + ] + }, + "UpdateTensorboard": { + "methods": [ + "update_tensorboard" + ] + }, + "UpdateTensorboardExperiment": { + "methods": [ + "update_tensorboard_experiment" + ] + }, + "UpdateTensorboardRun": { + "methods": [ + "update_tensorboard_run" + ] + }, + "UpdateTensorboardTimeSeries": { + "methods": [ + "update_tensorboard_time_series" + ] + }, + "WriteTensorboardExperimentData": { + "methods": [ + "write_tensorboard_experiment_data" + ] + }, + "WriteTensorboardRunData": { + "methods": [ + "write_tensorboard_run_data" + ] + } + } + }, + "grpc-async": { + "libraryClient": "TensorboardServiceAsyncClient", + "rpcs": { + "BatchCreateTensorboardRuns": { + "methods": [ + "batch_create_tensorboard_runs" + ] + }, + "BatchCreateTensorboardTimeSeries": { + "methods": [ + "batch_create_tensorboard_time_series" + ] + }, + "BatchReadTensorboardTimeSeriesData": { + "methods": [ + "batch_read_tensorboard_time_series_data" + ] + }, + "CreateTensorboard": { + "methods": [ + "create_tensorboard" + ] + }, + "CreateTensorboardExperiment": { + "methods": [ + "create_tensorboard_experiment" + ] + }, + "CreateTensorboardRun": { + "methods": [ + "create_tensorboard_run" + ] + }, + "CreateTensorboardTimeSeries": { + "methods": [ + "create_tensorboard_time_series" + ] + }, + "DeleteTensorboard": { + "methods": [ + "delete_tensorboard" + ] + }, + "DeleteTensorboardExperiment": { + "methods": [ + "delete_tensorboard_experiment" + ] + }, + "DeleteTensorboardRun": { + "methods": [ + "delete_tensorboard_run" + ] + }, + "DeleteTensorboardTimeSeries": { + "methods": [ + "delete_tensorboard_time_series" + ] + }, + "ExportTensorboardTimeSeriesData": { + "methods": [ + "export_tensorboard_time_series_data" + ] + }, + "GetTensorboard": { + "methods": [ + "get_tensorboard" + ] + }, + "GetTensorboardExperiment": { + "methods": [ + "get_tensorboard_experiment" + ] + }, + "GetTensorboardRun": { + "methods": [ + "get_tensorboard_run" + ] + }, + "GetTensorboardTimeSeries": { + "methods": [ + "get_tensorboard_time_series" + ] + }, + "ListTensorboardExperiments": { + "methods": [ + "list_tensorboard_experiments" + ] + }, + "ListTensorboardRuns": { + "methods": [ + "list_tensorboard_runs" + ] + }, + "ListTensorboardTimeSeries": { + "methods": [ + "list_tensorboard_time_series" + ] + }, + "ListTensorboards": { + "methods": [ + "list_tensorboards" + ] + }, + "ReadTensorboardBlobData": { + "methods": [ + "read_tensorboard_blob_data" + ] + }, + "ReadTensorboardTimeSeriesData": { + "methods": [ + "read_tensorboard_time_series_data" + ] + }, + "UpdateTensorboard": { + "methods": [ + "update_tensorboard" + ] + }, + "UpdateTensorboardExperiment": { + "methods": [ + "update_tensorboard_experiment" + ] + }, + "UpdateTensorboardRun": { + "methods": [ + "update_tensorboard_run" + ] + }, + "UpdateTensorboardTimeSeries": { + "methods": [ + "update_tensorboard_time_series" + ] + }, + "WriteTensorboardExperimentData": { + "methods": [ + "write_tensorboard_experiment_data" + ] + }, + "WriteTensorboardRunData": { + "methods": [ + "write_tensorboard_run_data" + ] + } + } + } + } + }, + "VizierService": { + "clients": { + "grpc": { + "libraryClient": "VizierServiceClient", + "rpcs": { + "AddTrialMeasurement": { + "methods": [ + "add_trial_measurement" + ] + }, + "CheckTrialEarlyStoppingState": { + "methods": [ + "check_trial_early_stopping_state" + ] + }, + "CompleteTrial": { + "methods": [ + "complete_trial" + ] + }, + "CreateStudy": { + "methods": [ + "create_study" + ] + }, + "CreateTrial": { + "methods": [ + "create_trial" + ] + }, + "DeleteStudy": { + "methods": [ + "delete_study" + ] + }, + "DeleteTrial": { + "methods": [ + "delete_trial" + ] + }, + "GetStudy": { + "methods": [ + "get_study" + ] + }, + "GetTrial": { + "methods": [ + "get_trial" + ] + }, + "ListOptimalTrials": { + "methods": [ + "list_optimal_trials" + ] + }, + "ListStudies": { + "methods": [ + "list_studies" + ] + }, + "ListTrials": { + "methods": [ + "list_trials" + ] + }, + "LookupStudy": { + "methods": [ + "lookup_study" + ] + }, + "StopTrial": { + "methods": [ + "stop_trial" + ] + }, + "SuggestTrials": { + "methods": [ + "suggest_trials" + ] + } + } + }, + "grpc-async": { + "libraryClient": "VizierServiceAsyncClient", + "rpcs": { + "AddTrialMeasurement": { + "methods": [ + "add_trial_measurement" + ] + }, + "CheckTrialEarlyStoppingState": { + "methods": [ + "check_trial_early_stopping_state" + ] + }, + "CompleteTrial": { + "methods": [ + "complete_trial" + ] + }, + "CreateStudy": { + "methods": [ + "create_study" + ] + }, + "CreateTrial": { + "methods": [ + "create_trial" + ] + }, + "DeleteStudy": { + "methods": [ + "delete_study" + ] + }, + "DeleteTrial": { + "methods": [ + "delete_trial" + ] + }, + "GetStudy": { + "methods": [ + "get_study" + ] + }, + "GetTrial": { + "methods": [ + "get_trial" + ] + }, + "ListOptimalTrials": { + "methods": [ + "list_optimal_trials" + ] + }, + "ListStudies": { + "methods": [ + "list_studies" + ] + }, + "ListTrials": { + "methods": [ + "list_trials" + ] + }, + "LookupStudy": { + "methods": [ + "lookup_study" + ] + }, + "StopTrial": { + "methods": [ + "stop_trial" + ] + }, + "SuggestTrials": { + "methods": [ + "suggest_trials" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py new file mode 100644 index 0000000000..e8e1c3845d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py new file mode 100644 index 0000000000..3597de859d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DatasetServiceClient +from .async_client import DatasetServiceAsyncClient + +__all__ = ( + 'DatasetServiceClient', + 'DatasetServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py new file mode 100644 index 0000000000..758dbf92af --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -0,0 +1,1989 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport +from .client import DatasetServiceClient + + +class DatasetServiceAsyncClient: + """The service that handles the CRUD of Vertex AI Dataset and + its child resources. + """ + + _client: DatasetServiceClient + + DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT + + annotation_path = staticmethod(DatasetServiceClient.annotation_path) + parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) + annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) + parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) + data_item_path = staticmethod(DatasetServiceClient.data_item_path) + parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) + dataset_path = staticmethod(DatasetServiceClient.dataset_path) + parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) + common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) + common_project_path = staticmethod(DatasetServiceClient.common_project_path) + parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) + common_location_path = staticmethod(DatasetServiceClient.common_location_path) + parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return DatasetServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = DatasetServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_dataset(self, + request: Union[dataset_service.CreateDatasetRequest, dict] = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_dataset(self, + request: Union[dataset_service.GetDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetDatasetRequest, dict]): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + name (:class:`str`): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset(self, + request: Union[dataset_service.UpdateDatasetRequest, dict] = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_datasets(self, + request: Union[dataset_service.ListDatasetsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists Datasets in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_datasets(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest, dict]): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + parent (:class:`str`): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsAsyncPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_dataset(self, + request: Union[dataset_service.DeleteDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest, dict]): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data(self, + request: Union[dataset_service.ImportDataRequest, dict] = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_import_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1beta1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1beta1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportDataRequest, dict]): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]`): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs: + request.import_configs.extend(import_configs) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data(self, + request: Union[dataset_service.ExportDataRequest, dict] = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports data from a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_export_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + export_config = aiplatform_v1beta1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1beta1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportDataRequest, dict]): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportDataConfig`): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_data_items(self, + request: Union[dataset_service.ListDataItemsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsAsyncPager: + r"""Lists DataItems in a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_data_items(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest, dict]): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + parent (:class:`str`): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsAsyncPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDataItemsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_items, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataItemsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec(self, + request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest, dict]): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + name (:class:`str`): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_annotations(self, + request: Union[dataset_service.ListAnnotationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsAsyncPager: + r"""Lists Annotations belongs to a dataitem + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_annotations(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest, dict]): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + parent (:class:`str`): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsAsyncPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListAnnotationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_annotations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAnnotationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py new file mode 100644 index 0000000000..71c40dca60 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -0,0 +1,2210 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import DatasetServiceGrpcTransport +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +class DatasetServiceClientMeta(type): + """Metaclass for the DatasetService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] + _transport_registry["grpc"] = DatasetServiceGrpcTransport + _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[DatasetServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DatasetServiceClient(metaclass=DatasetServiceClientMeta): + """The service that handles the CRUD of Vertex AI Dataset and + its child resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: + """Returns a fully-qualified annotation string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + + @staticmethod + def parse_annotation_path(path: str) -> Dict[str,str]: + """Parses a annotation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: + """Returns a fully-qualified annotation_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + + @staticmethod + def parse_annotation_spec_path(path: str) -> Dict[str,str]: + """Parses a annotation_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: + """Returns a fully-qualified data_item string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + + @staticmethod + def parse_data_item_path(path: str) -> Dict[str,str]: + """Parses a data_item path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DatasetServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DatasetServiceTransport): + # transport is a DatasetServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_dataset(self, + request: Union[dataset_service.CreateDatasetRequest, dict] = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + parent (str): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.CreateDatasetRequest): + request = dataset_service.CreateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + def get_dataset(self, + request: Union[dataset_service.GetDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetDatasetRequest, dict]): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + name (str): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetDatasetRequest): + request = dataset_service.GetDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset(self, + request: Union[dataset_service.UpdateDatasetRequest, dict] = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest, dict]): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.UpdateDatasetRequest): + request = dataset_service.UpdateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_datasets(self, + request: Union[dataset_service.ListDatasetsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists Datasets in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_datasets(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest, dict]): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + parent (str): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDatasetsRequest): + request = dataset_service.ListDatasetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_dataset(self, + request: Union[dataset_service.DeleteDatasetRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest, dict]): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.DeleteDatasetRequest): + request = dataset_service.DeleteDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_data(self, + request: Union[dataset_service.ImportDataRequest, dict] = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports data into a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_import_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1beta1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1beta1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportDataRequest, dict]): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ImportDataRequest): + request = dataset_service.ImportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs is not None: + request.import_configs = import_configs + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def export_data(self, + request: Union[dataset_service.ExportDataRequest, dict] = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports data from a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_export_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + export_config = aiplatform_v1beta1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1beta1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportDataRequest, dict]): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ExportDataRequest): + request = dataset_service.ExportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def list_data_items(self, + request: Union[dataset_service.ListDataItemsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsPager: + r"""Lists DataItems in a Dataset. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_data_items(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest, dict]): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + parent (str): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDataItemsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDataItemsRequest): + request = dataset_service.ListDataItemsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_items] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataItemsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec(self, + request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest, dict]): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + name (str): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetAnnotationSpecRequest): + request = dataset_service.GetAnnotationSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_annotations(self, + request: Union[dataset_service.ListAnnotationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsPager: + r"""Lists Annotations belongs to a dataitem + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_annotations(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest, dict]): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListAnnotationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListAnnotationsRequest): + request = dataset_service.ListAnnotationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_annotations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAnnotationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py new file mode 100644 index 0000000000..dd9dbb7901 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDatasetsResponse], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDataItemsResponse], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[data_item.DataItem]: + for page in self.pages: + yield from page.data_items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsAsyncPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[data_item.DataItem]: + async def async_generator(): + async for page in self.pages: + for response in page.data_items: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListAnnotationsResponse], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[annotation.Annotation]: + for page in self.pages: + yield from page.annotations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsAsyncPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[annotation.Annotation]: + async def async_generator(): + async for page in self.pages: + for response in page.annotations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py new file mode 100644 index 0000000000..c7d3ef664d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DatasetServiceTransport +from .grpc import DatasetServiceGrpcTransport +from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] +_transport_registry['grpc'] = DatasetServiceGrpcTransport +_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport + +__all__ = ( + 'DatasetServiceTransport', + 'DatasetServiceGrpcTransport', + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py new file mode 100644 index 0000000000..613a237091 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -0,0 +1,381 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class DatasetServiceTransport(abc.ABC): + """Abstract transport class for DatasetService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, + default_timeout=5.0, + client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, + default_timeout=5.0, + client_info=client_info, + ), + self.list_data_items: gapic_v1.method.wrap_method( + self.list_data_items, + default_timeout=5.0, + client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_timeout=5.0, + client_info=client_info, + ), + self.list_annotations: gapic_v1.method.wrap_method( + self.list_annotations, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Union[ + dataset.Dataset, + Awaitable[dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Union[ + gca_dataset.Dataset, + Awaitable[gca_dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Union[ + dataset_service.ListDatasetsResponse, + Awaitable[dataset_service.ListDatasetsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Union[ + dataset_service.ListDataItemsResponse, + Awaitable[dataset_service.ListDataItemsResponse] + ]]: + raise NotImplementedError() + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Union[ + annotation_spec.AnnotationSpec, + Awaitable[annotation_spec.AnnotationSpec] + ]]: + raise NotImplementedError() + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Union[ + dataset_service.ListAnnotationsResponse, + Awaitable[dataset_service.ListAnnotationsResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'DatasetServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py new file mode 100644 index 0000000000..32ef2da91d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -0,0 +1,728 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO + + +class DatasetServiceGrpcTransport(DatasetServiceTransport): + """gRPC backend transport for DatasetService. + + The service that handles the CRUD of Vertex AI Dataset and + its child resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + dataset_service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + dataset_service.ListDataItemsResponse]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + ~.ListDataItemsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + dataset_service.ListAnnotationsResponse]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + ~.ListAnnotationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'DatasetServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..fe3eff48cf --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -0,0 +1,727 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import DatasetServiceGrpcTransport + + +class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): + """gRPC AsyncIO backend transport for DatasetService. + + The service that handles the CRUD of Vertex AI Dataset and + its child resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Awaitable[dataset_service.ListDatasetsResponse]]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Awaitable[dataset_service.ListDataItemsResponse]]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + Awaitable[~.ListDataItemsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Awaitable[annotation_spec.AnnotationSpec]]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Awaitable[dataset_service.ListAnnotationsResponse]]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + Awaitable[~.ListAnnotationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py new file mode 100644 index 0000000000..3d4508fc4a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import EndpointServiceClient +from .async_client import EndpointServiceAsyncClient + +__all__ = ( + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py new file mode 100644 index 0000000000..788d0c9363 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -0,0 +1,1729 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport +from .client import EndpointServiceClient + + +class EndpointServiceAsyncClient: + """A service for managing Vertex AI's Endpoints.""" + + _client: EndpointServiceClient + + DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) + model_path = staticmethod(EndpointServiceClient.model_path) + parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) + model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.model_deployment_monitoring_job_path) + parse_model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.parse_model_deployment_monitoring_job_path) + network_path = staticmethod(EndpointServiceClient.network_path) + parse_network_path = staticmethod(EndpointServiceClient.parse_network_path) + common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) + common_project_path = staticmethod(EndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) + common_location_path = staticmethod(EndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return EndpointServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = EndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_endpoint(self, + request: Union[endpoint_service.CreateEndpointRequest, dict] = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint_id (:class:`str`): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint, endpoint_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.CreateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_endpoint(self, + request: Union[endpoint_service.GetEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetEndpointRequest, dict]): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + name (:class:`str`): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.GetEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_endpoints(self, + request: Union[endpoint_service.ListEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsAsyncPager: + r"""Lists Endpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_endpoints(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest, dict]): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.ListEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_endpoints, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEndpointsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_endpoint(self, + request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = await client.update_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UpdateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_endpoint(self, + request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest, dict]): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + name (:class:`str`): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeleteEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_model(self, + request: Union[endpoint_service.DeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_deploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1beta1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1beta1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeployModelRequest, dict]): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (:class:`google.cloud.aiplatform_v1beta1.types.DeployedModel`): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Mapping[str, int]`): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model(self, + request: Union[endpoint_service.UndeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_undeploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest, dict]): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Mapping[str, int]`): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py new file mode 100644 index 0000000000..59f5a26228 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -0,0 +1,1948 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import EndpointServiceGrpcTransport +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +class EndpointServiceClientMeta(type): + """Metaclass for the EndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] + _transport_registry["grpc"] = EndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[EndpointServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class EndpointServiceClient(metaclass=EndpointServiceClientMeta): + """A service for managing Vertex AI's Endpoints.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: + """Returns a fully-qualified model_deployment_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + + @staticmethod + def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: + """Parses a model_deployment_monitoring_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, EndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, EndpointServiceTransport): + # transport is a EndpointServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_endpoint(self, + request: Union[endpoint_service.CreateEndpointRequest, dict] = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + parent (str): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint, endpoint_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.CreateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.CreateEndpointRequest): + request = endpoint_service.CreateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_endpoint(self, + request: Union[endpoint_service.GetEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetEndpointRequest, dict]): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.GetEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.GetEndpointRequest): + request = endpoint_service.GetEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_endpoints(self, + request: Union[endpoint_service.ListEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsPager: + r"""Lists Endpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_endpoints(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest, dict]): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.ListEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.ListEndpointsRequest): + request = endpoint_service.ListEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_endpoint(self, + request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = client.update_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest, dict]): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UpdateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UpdateEndpointRequest): + request = endpoint_service.UpdateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_endpoint(self, + request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Endpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest, dict]): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + name (str): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeleteEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeleteEndpointRequest): + request = endpoint_service.DeleteEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_model(self, + request: Union[endpoint_service.DeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_deploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1beta1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1beta1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeployModelRequest, dict]): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + endpoint (str): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Mapping[str, int]): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeployModelRequest): + request = endpoint_service.DeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model(self, + request: Union[endpoint_service.UndeployModelRequest, dict] = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Mapping[str, int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_undeploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest, dict]): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + endpoint (str): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Mapping[str, int]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UndeployModelRequest): + request = endpoint_service.UndeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py new file mode 100644 index 0000000000..8a991cc76f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service + + +class ListEndpointsPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., endpoint_service.ListEndpointsResponse], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[endpoint.Endpoint]: + for page in self.pages: + yield from page.endpoints + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEndpointsAsyncPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[endpoint.Endpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..e1f410c28c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import EndpointServiceTransport +from .grpc import EndpointServiceGrpcTransport +from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] +_transport_registry['grpc'] = EndpointServiceGrpcTransport +_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport + +__all__ = ( + 'EndpointServiceTransport', + 'EndpointServiceGrpcTransport', + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py new file mode 100644 index 0000000000..46982a8d84 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class EndpointServiceTransport(abc.ABC): + """Abstract transport class for EndpointService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_endpoint: gapic_v1.method.wrap_method( + self.create_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.get_endpoint: gapic_v1.method.wrap_method( + self.get_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.list_endpoints: gapic_v1.method.wrap_method( + self.list_endpoints, + default_timeout=5.0, + client_info=client_info, + ), + self.update_endpoint: gapic_v1.method.wrap_method( + self.update_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_endpoint: gapic_v1.method.wrap_method( + self.delete_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, + default_timeout=5.0, + client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Union[ + endpoint.Endpoint, + Awaitable[endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Union[ + endpoint_service.ListEndpointsResponse, + Awaitable[endpoint_service.ListEndpointsResponse] + ]]: + raise NotImplementedError() + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Union[ + gca_endpoint.Endpoint, + Awaitable[gca_endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'EndpointServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..2ed50e19aa --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -0,0 +1,651 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class EndpointServiceGrpcTransport(EndpointServiceTransport): + """gRPC backend transport for EndpointService. + + A service for managing Vertex AI's Endpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + endpoint.Endpoint]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + endpoint_service.ListEndpointsResponse]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + ~.ListEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + gca_endpoint.Endpoint]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'EndpointServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..c9a2623588 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import EndpointServiceGrpcTransport + + +class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): + """gRPC AsyncIO backend transport for EndpointService. + + A service for managing Vertex AI's Endpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Awaitable[endpoint.Endpoint]]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Awaitable[endpoint_service.ListEndpointsResponse]]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + Awaitable[~.ListEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Awaitable[gca_endpoint.Endpoint]]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py new file mode 100644 index 0000000000..ef931462a5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreOnlineServingServiceClient +from .async_client import FeaturestoreOnlineServingServiceAsyncClient + +__all__ = ( + 'FeaturestoreOnlineServingServiceClient', + 'FeaturestoreOnlineServingServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py new file mode 100644 index 0000000000..e9fe025d85 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -0,0 +1,1058 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport +from .client import FeaturestoreOnlineServingServiceClient + + +class FeaturestoreOnlineServingServiceAsyncClient: + """A service for serving online feature values.""" + + _client: FeaturestoreOnlineServingServiceClient + + DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.entity_type_path) + parse_entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_entity_type_path) + common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_organization_path) + common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_project_path) + parse_common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_project_path) + common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_location_path) + parse_common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return FeaturestoreOnlineServingServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(FeaturestoreOnlineServingServiceClient).get_transport_class, type(FeaturestoreOnlineServingServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreOnlineServingServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def read_feature_values(self, + request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = await client.read_feature_values(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_online_service.ReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def streaming_read_feature_values(self, + request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = await client.streaming_read_feature_values(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (:class:`str`): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreOnlineServingServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py new file mode 100644 index 0000000000..b1e6739e67 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -0,0 +1,1252 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +class FeaturestoreOnlineServingServiceClientMeta(type): + """Metaclass for the FeaturestoreOnlineServingService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] + _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FeaturestoreOnlineServingServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreOnlineServingServiceClient(metaclass=FeaturestoreOnlineServingServiceClientMeta): + """A service for serving online feature values.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str,str]: + """Parses a entity_type path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreOnlineServingServiceTransport): + # transport is a FeaturestoreOnlineServingServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def read_feature_values(self, + request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = client.read_feature_values(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.ReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_online_service.ReadFeatureValuesRequest): + request = featurestore_online_service.ReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def streaming_read_feature_values(self, + request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = client.streaming_read_feature_values(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + ``user``. + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.StreamingReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_online_service.StreamingReadFeatureValuesRequest): + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.streaming_read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreOnlineServingServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py new file mode 100644 index 0000000000..f1db8ba8e1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreOnlineServingServiceTransport +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] +_transport_registry['grpc'] = FeaturestoreOnlineServingServiceGrpcTransport +_transport_registry['grpc_asyncio'] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + +__all__ = ( + 'FeaturestoreOnlineServingServiceTransport', + 'FeaturestoreOnlineServingServiceGrpcTransport', + 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py new file mode 100644 index 0000000000..a7a31660e2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FeaturestoreOnlineServingServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreOnlineServingService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_feature_values: gapic_v1.method.wrap_method( + self.read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.streaming_read_feature_values: gapic_v1.method.wrap_method( + self.streaming_read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse] + ]]: + raise NotImplementedError() + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'FeaturestoreOnlineServingServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py new file mode 100644 index 0000000000..01b9e7481f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py @@ -0,0 +1,502 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreOnlineServingServiceGrpcTransport(FeaturestoreOnlineServingServiceTransport): + """gRPC backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_feature_values' not in self._stubs: + self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['read_feature_values'] + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'streaming_read_feature_values' not in self._stubs: + self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['streaming_read_feature_values'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'FeaturestoreOnlineServingServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..795c64b59d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport + + +class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(FeaturestoreOnlineServingServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_feature_values' not in self._stubs: + self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['read_feature_values'] + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'streaming_read_feature_values' not in self._stubs: + self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['streaming_read_feature_values'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py new file mode 100644 index 0000000000..b8542bb22c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreServiceClient +from .async_client import FeaturestoreServiceAsyncClient + +__all__ = ( + 'FeaturestoreServiceClient', + 'FeaturestoreServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py new file mode 100644 index 0000000000..d31de54c2c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -0,0 +1,3378 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport +from .client import FeaturestoreServiceClient + + +class FeaturestoreServiceAsyncClient: + """The service that handles CRUD and List for resources for + Featurestore. + """ + + _client: FeaturestoreServiceClient + + DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) + parse_entity_type_path = staticmethod(FeaturestoreServiceClient.parse_entity_type_path) + feature_path = staticmethod(FeaturestoreServiceClient.feature_path) + parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) + featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) + parse_featurestore_path = staticmethod(FeaturestoreServiceClient.parse_featurestore_path) + common_billing_account_path = staticmethod(FeaturestoreServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(FeaturestoreServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(FeaturestoreServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(FeaturestoreServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(FeaturestoreServiceClient.parse_common_organization_path) + common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) + parse_common_project_path = staticmethod(FeaturestoreServiceClient.parse_common_project_path) + common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) + parse_common_location_path = staticmethod(FeaturestoreServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return FeaturestoreServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(FeaturestoreServiceClient).get_transport_class, type(FeaturestoreServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_featurestore(self, + request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Featurestore in a given project and + location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + parent (:class:`str`): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore_id (:class:`str`): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore, featurestore_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_featurestore(self, + request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_featurestore(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + name (:class:`str`): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Featurestore: + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_featurestores(self, + request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresAsyncPager: + r"""Lists Featurestores in a given project and location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_featurestores(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + parent (:class:`str`): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListFeaturestoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_featurestores, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturestoresAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_featurestore(self, + request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the parameters of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore.name", request.featurestore.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_featurestore(self, + request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + name (:class:`str`): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_entity_type(self, + request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new EntityType in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + parent (:class:`str`): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type_id (:class:`str`): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type, entity_type_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_entity_type(self, + request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + name (:class:`str`): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_entity_types(self, + request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesAsyncPager: + r"""Lists EntityTypes in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_entity_types(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + parent (:class:`str`): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListEntityTypesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_entity_types, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEntityTypesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_entity_type(self, + request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateEntityTypeRequest( + ) + + # Make the request + response = await client.update_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type.name", request.entity_type.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_entity_type(self, + request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (:class:`str`): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (:class:`bool`): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_feature(self, + request: Union[featurestore_service.CreateFeatureRequest, dict] = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + feature_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Feature in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature_id (:class:`str`): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature, feature_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + if feature_id is not None: + request.feature_id = feature_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_create_features(self, + request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a batch of Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_batch_create_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1beta1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]`): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.BatchCreateFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_feature(self, + request: Union[featurestore_service.GetFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = await client.get_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + name (:class:`str`): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_features(self, + request: Union[featurestore_service.ListFeaturesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesAsyncPager: + r"""Lists Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + parent (:class:`str`): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_feature(self, + request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = await client.update_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``disable_monitoring`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("feature.name", request.feature.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_feature(self, + request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + name (:class:`str`): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_feature_values(self, + request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_import_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1beta1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1beta1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1beta1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ImportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_read_feature_values(self, + request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, + *, + featurestore: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1beta1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1beta1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + featurestore (:class:`str`): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.BatchReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore", request.featurestore), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_feature_values(self, + request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports Feature values from all the entities of a + target EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_export_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ExportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def search_features(self, + request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, + *, + location: str = None, + query: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesAsyncPager: + r"""Searches Features matching a query in a given + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_search_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + location (:class:`str`): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`str`): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location, query]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.SearchFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("location", request.location), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchFeaturesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py new file mode 100644 index 0000000000..67f0f149b4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -0,0 +1,3590 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +class FeaturestoreServiceClientMeta(type): + """Metaclass for the FeaturestoreService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] + _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FeaturestoreServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): + """The service that handles CRUD and List for resources for + Featurestore. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str,str]: + """Parses a entity_type path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def feature_path(project: str,location: str,featurestore: str,entity_type: str,feature: str,) -> str: + """Returns a fully-qualified feature string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) + + @staticmethod + def parse_feature_path(path: str) -> Dict[str,str]: + """Parses a feature path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def featurestore_path(project: str,location: str,featurestore: str,) -> str: + """Returns a fully-qualified featurestore string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) + + @staticmethod + def parse_featurestore_path(path: str) -> Dict[str,str]: + """Parses a featurestore path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreServiceTransport): + # transport is a FeaturestoreServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_featurestore(self, + request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Featurestore in a given project and + location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore_id (str): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore, featurestore_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): + request = featurestore_service.CreateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_featurestore(self, + request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_featurestore(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + name (str): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Featurestore: + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeaturestoreRequest): + request = featurestore_service.GetFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_featurestores(self, + request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresPager: + r"""Lists Featurestores in a given project and location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_featurestores(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturestoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturestoresRequest): + request = featurestore_service.ListFeaturestoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_featurestores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturestoresPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_featurestore(self, + request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates the parameters of a single Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): + request = featurestore_service.UpdateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore.name", request.featurestore.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_featurestore(self, + request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (bool): + If set to true, any EntityTypes and + Features for this Featurestore will also + be deleted. (Otherwise, the request will + only work if the Featurestore has no + EntityTypes.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): + request = featurestore_service.DeleteFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_entity_type(self, + request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new EntityType in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + parent (str): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type, entity_type_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateEntityTypeRequest): + request = featurestore_service.CreateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + def get_entity_type(self, + request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = client.get_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetEntityTypeRequest): + request = featurestore_service.GetEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_entity_types(self, + request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesPager: + r"""Lists EntityTypes in a given Featurestore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_entity_types(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListEntityTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListEntityTypesRequest): + request = featurestore_service.ListEntityTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_entity_types] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEntityTypesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_entity_type(self, + request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateEntityTypeRequest( + ) + + # Make the request + response = client.update_entity_type(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): + request = featurestore_service.UpdateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type.name", request.entity_type.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_entity_type(self, + request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, + *, + name: str = None, + force: bool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (str): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + force (bool): + If set to true, any Features for this + EntityType will also be deleted. + (Otherwise, the request will only work + if the EntityType has no Features.) + + This corresponds to the ``force`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, force]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): + request = featurestore_service.DeleteEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if force is not None: + request.force = force + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_feature(self, + request: Union[featurestore_service.CreateFeatureRequest, dict] = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + feature_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Feature in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + parent (str): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature_id (str): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature, feature_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeatureRequest): + request = featurestore_service.CreateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + if feature_id is not None: + request.feature_id = feature_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_create_features(self, + request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a batch of Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_batch_create_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1beta1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + parent (str): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchCreateFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): + request = featurestore_service.BatchCreateFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + def get_feature(self, + request: Union[featurestore_service.GetFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = client.get_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeatureRequest): + request = featurestore_service.GetFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_features(self, + request: Union[featurestore_service.ListFeaturesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesPager: + r"""Lists Features in a given EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturesRequest): + request = featurestore_service.ListFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_feature(self, + request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = client.update_feature(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``disable_monitoring`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeatureRequest): + request = featurestore_service.UpdateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("feature.name", request.feature.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_feature(self, + request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Feature. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest, dict]): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + name (str): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeatureRequest): + request = featurestore_service.DeleteFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_feature_values(self, + request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_import_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1beta1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1beta1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1beta1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ImportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): + request = featurestore_service.ImportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_read_feature_values(self, + request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, + *, + featurestore: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1beta1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1beta1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + featurestore (str): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): + request = featurestore_service.BatchReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore", request.featurestore), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def export_feature_values(self, + request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, + *, + entity_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports Feature values from all the entities of a + target EntityType. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_export_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest, dict]): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ExportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): + request = featurestore_service.ExportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def search_features(self, + request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, + *, + location: str = None, + query: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesPager: + r"""Searches Features matching a query in a given + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_search_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest, dict]): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. + Field-restricted queries and filters can be combined + using ``AND`` to form a conjunction. + + A field query is in the form FIELD:QUERY. This + implicitly checks if QUERY exists as a substring within + Feature's FIELD. The QUERY and the FIELD are converted + to a sequence of words (i.e. tokens) for comparison. + This is done by: + + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches + characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double + quotation marks ("). With phrases, the order of the + words is important. Words in the phrase must be matching + in order and consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature + with ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches + a Feature with ID containing the substring ``foo`` + and description containing the substring ``bar``. + + Besides field queries, the following exact-match filters + are supported. The exact-match filters do not support + wildcards. Unlike field-restricted queries, exact-match + filters are case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as + key presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location, query]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.SearchFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.SearchFeaturesRequest): + request = featurestore_service.SearchFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("location", request.location), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchFeaturesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py new file mode 100644 index 0000000000..65351ff6cf --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py @@ -0,0 +1,505 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service + + +class ListFeaturestoresPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListFeaturestoresResponse], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[featurestore.Featurestore]: + for page in self.pages: + yield from page.featurestores + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturestoresAsyncPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturestoresResponse]], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[featurestore.Featurestore]: + async def async_generator(): + async for page in self.pages: + for response in page.featurestores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEntityTypesPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListEntityTypesResponse], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[entity_type.EntityType]: + for page in self.pages: + yield from page.entity_types + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEntityTypesAsyncPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[entity_type.EntityType]: + async def async_generator(): + async for page in self.pages: + for response in page.entity_types: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturesPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListFeaturesResponse], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturesAsyncPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchFeaturesPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.SearchFeaturesResponse], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchFeaturesAsyncPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py new file mode 100644 index 0000000000..78cfc62d21 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreServiceTransport +from .grpc import FeaturestoreServiceGrpcTransport +from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] +_transport_registry['grpc'] = FeaturestoreServiceGrpcTransport +_transport_registry['grpc_asyncio'] = FeaturestoreServiceGrpcAsyncIOTransport + +__all__ = ( + 'FeaturestoreServiceTransport', + 'FeaturestoreServiceGrpcTransport', + 'FeaturestoreServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py new file mode 100644 index 0000000000..4a6a93f41c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py @@ -0,0 +1,523 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FeaturestoreServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_featurestore: gapic_v1.method.wrap_method( + self.create_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.get_featurestore: gapic_v1.method.wrap_method( + self.get_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.list_featurestores: gapic_v1.method.wrap_method( + self.list_featurestores, + default_timeout=5.0, + client_info=client_info, + ), + self.update_featurestore: gapic_v1.method.wrap_method( + self.update_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_featurestore: gapic_v1.method.wrap_method( + self.delete_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.create_entity_type: gapic_v1.method.wrap_method( + self.create_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.get_entity_type: gapic_v1.method.wrap_method( + self.get_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.list_entity_types: gapic_v1.method.wrap_method( + self.list_entity_types, + default_timeout=5.0, + client_info=client_info, + ), + self.update_entity_type: gapic_v1.method.wrap_method( + self.update_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_entity_type: gapic_v1.method.wrap_method( + self.delete_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.create_feature: gapic_v1.method.wrap_method( + self.create_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.batch_create_features: gapic_v1.method.wrap_method( + self.batch_create_features, + default_timeout=5.0, + client_info=client_info, + ), + self.get_feature: gapic_v1.method.wrap_method( + self.get_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.list_features: gapic_v1.method.wrap_method( + self.list_features, + default_timeout=5.0, + client_info=client_info, + ), + self.update_feature: gapic_v1.method.wrap_method( + self.update_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_feature: gapic_v1.method.wrap_method( + self.delete_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.import_feature_values: gapic_v1.method.wrap_method( + self.import_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.batch_read_feature_values: gapic_v1.method.wrap_method( + self.batch_read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.export_feature_values: gapic_v1.method.wrap_method( + self.export_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.search_features: gapic_v1.method.wrap_method( + self.search_features, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Union[ + featurestore.Featurestore, + Awaitable[featurestore.Featurestore] + ]]: + raise NotImplementedError() + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Union[ + featurestore_service.ListFeaturestoresResponse, + Awaitable[featurestore_service.ListFeaturestoresResponse] + ]]: + raise NotImplementedError() + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + Union[ + entity_type.EntityType, + Awaitable[entity_type.EntityType] + ]]: + raise NotImplementedError() + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Union[ + featurestore_service.ListEntityTypesResponse, + Awaitable[featurestore_service.ListEntityTypesResponse] + ]]: + raise NotImplementedError() + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Union[ + gca_entity_type.EntityType, + Awaitable[gca_entity_type.EntityType] + ]]: + raise NotImplementedError() + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + Union[ + feature.Feature, + Awaitable[feature.Feature] + ]]: + raise NotImplementedError() + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Union[ + featurestore_service.ListFeaturesResponse, + Awaitable[featurestore_service.ListFeaturesResponse] + ]]: + raise NotImplementedError() + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + Union[ + gca_feature.Feature, + Awaitable[gca_feature.Feature] + ]]: + raise NotImplementedError() + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Union[ + featurestore_service.SearchFeaturesResponse, + Awaitable[featurestore_service.SearchFeaturesResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'FeaturestoreServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py new file mode 100644 index 0000000000..3ce173a825 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py @@ -0,0 +1,1022 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): + """gRPC backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_featurestore' not in self._stubs: + self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_featurestore'] + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + featurestore.Featurestore]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + ~.Featurestore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_featurestore' not in self._stubs: + self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs['get_featurestore'] + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + featurestore_service.ListFeaturestoresResponse]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + ~.ListFeaturestoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_featurestores' not in self._stubs: + self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs['list_featurestores'] + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_featurestore' not in self._stubs: + self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_featurestore'] + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_featurestore' not in self._stubs: + self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_featurestore'] + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + operations_pb2.Operation]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_entity_type' not in self._stubs: + self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_entity_type'] + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + entity_type.EntityType]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_entity_type' not in self._stubs: + self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs['get_entity_type'] + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + featurestore_service.ListEntityTypesResponse]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + ~.ListEntityTypesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_entity_types' not in self._stubs: + self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs['list_entity_types'] + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + gca_entity_type.EntityType]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_entity_type' not in self._stubs: + self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs['update_entity_type'] + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_entity_type' not in self._stubs: + self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_entity_type'] + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + operations_pb2.Operation]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_feature' not in self._stubs: + self._stubs['create_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_feature'] + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_features' not in self._stubs: + self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_create_features'] + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + feature.Feature]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_feature' not in self._stubs: + self._stubs['get_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs['get_feature'] + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + featurestore_service.ListFeaturesResponse]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + ~.ListFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_features' not in self._stubs: + self._stubs['list_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs['list_features'] + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + gca_feature.Feature]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_feature' not in self._stubs: + self._stubs['update_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs['update_feature'] + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_feature' not in self._stubs: + self._stubs['delete_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_feature'] + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_feature_values' not in self._stubs: + self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_feature_values'] + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_feature_values' not in self._stubs: + self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_read_feature_values'] + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_feature_values' not in self._stubs: + self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_feature_values'] + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + featurestore_service.SearchFeaturesResponse]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + ~.SearchFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_features' not in self._stubs: + self._stubs['search_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs['search_features'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'FeaturestoreServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..6c83628270 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py @@ -0,0 +1,1021 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreServiceGrpcTransport + + +class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_featurestore' not in self._stubs: + self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_featurestore'] + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Awaitable[featurestore.Featurestore]]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + Awaitable[~.Featurestore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_featurestore' not in self._stubs: + self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs['get_featurestore'] + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Awaitable[featurestore_service.ListFeaturestoresResponse]]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + Awaitable[~.ListFeaturestoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_featurestores' not in self._stubs: + self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs['list_featurestores'] + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_featurestore' not in self._stubs: + self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_featurestore'] + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_featurestore' not in self._stubs: + self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_featurestore'] + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_entity_type' not in self._stubs: + self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_entity_type'] + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + Awaitable[entity_type.EntityType]]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_entity_type' not in self._stubs: + self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs['get_entity_type'] + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Awaitable[featurestore_service.ListEntityTypesResponse]]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + Awaitable[~.ListEntityTypesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_entity_types' not in self._stubs: + self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs['list_entity_types'] + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Awaitable[gca_entity_type.EntityType]]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_entity_type' not in self._stubs: + self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs['update_entity_type'] + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_entity_type' not in self._stubs: + self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_entity_type'] + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_feature' not in self._stubs: + self._stubs['create_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_feature'] + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_features' not in self._stubs: + self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_create_features'] + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + Awaitable[feature.Feature]]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_feature' not in self._stubs: + self._stubs['get_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs['get_feature'] + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Awaitable[featurestore_service.ListFeaturesResponse]]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + Awaitable[~.ListFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_features' not in self._stubs: + self._stubs['list_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs['list_features'] + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + Awaitable[gca_feature.Feature]]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_feature' not in self._stubs: + self._stubs['update_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs['update_feature'] + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_feature' not in self._stubs: + self._stubs['delete_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_feature'] + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_feature_values' not in self._stubs: + self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_feature_values'] + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_feature_values' not in self._stubs: + self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_read_feature_values'] + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_feature_values' not in self._stubs: + self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_feature_values'] + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Awaitable[featurestore_service.SearchFeaturesResponse]]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + Awaitable[~.SearchFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_features' not in self._stubs: + self._stubs['search_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs['search_features'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'FeaturestoreServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py new file mode 100644 index 0000000000..3ae6bb8e71 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import IndexEndpointServiceClient +from .async_client import IndexEndpointServiceAsyncClient + +__all__ = ( + 'IndexEndpointServiceClient', + 'IndexEndpointServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py new file mode 100644 index 0000000000..97dcf618c7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -0,0 +1,1793 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport +from .client import IndexEndpointServiceClient + + +class IndexEndpointServiceAsyncClient: + """A service for managing Vertex AI's IndexEndpoints.""" + + _client: IndexEndpointServiceClient + + DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexEndpointServiceClient.index_path) + parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod(IndexEndpointServiceClient.parse_index_endpoint_path) + common_billing_account_path = staticmethod(IndexEndpointServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(IndexEndpointServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(IndexEndpointServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(IndexEndpointServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(IndexEndpointServiceClient.parse_common_organization_path) + common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod(IndexEndpointServiceClient.parse_common_project_path) + common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod(IndexEndpointServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return IndexEndpointServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(IndexEndpointServiceClient).get_transport_class, type(IndexEndpointServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = IndexEndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_index_endpoint(self, + request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.CreateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index_endpoint(self, + request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + name (:class:`str`): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.GetIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_index_endpoints(self, + request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsAsyncPager: + r"""Lists IndexEndpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest, dict]): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.ListIndexEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_index_endpoints, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexEndpointsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index_endpoint(self, + request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = await client.update_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint.name", request.index_endpoint.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_index_endpoint(self, + request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + name (:class:`str`): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_index(self, + request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_deploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.DeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_index(self, + request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_undeploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (:class:`str`): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.UndeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def mutate_deployed_index(self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.MutateDeployedIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_deployed_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexEndpointServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py new file mode 100644 index 0000000000..d61fcd4103 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -0,0 +1,1996 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexEndpointServiceGrpcTransport +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +class IndexEndpointServiceClientMeta(type): + """Metaclass for the IndexEndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] + _transport_registry["grpc"] = IndexEndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[IndexEndpointServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): + """A service for managing Vertex AI's IndexEndpoints.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def index_path(project: str,location: str,index: str,) -> str: + """Returns a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str,str]: + """Parses a index path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: + """Returns a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str,str]: + """Parses a index_endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, IndexEndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexEndpointServiceTransport): + # transport is a IndexEndpointServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_index_endpoint(self, + request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + parent (str): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.CreateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.CreateIndexEndpointRequest): + request = index_endpoint_service.CreateIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index_endpoint(self, + request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + name (str): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.GetIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.GetIndexEndpointRequest): + request = index_endpoint_service.GetIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_index_endpoints(self, + request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsPager: + r"""Lists IndexEndpoints in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest, dict]): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.ListIndexEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.ListIndexEndpointsRequest): + request = index_endpoint_service.ListIndexEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_index_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index_endpoint(self, + request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = client.update_index_endpoint(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UpdateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UpdateIndexEndpointRequest): + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint.name", request.index_endpoint.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_index_endpoint(self, + request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeleteIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeleteIndexEndpointRequest): + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_index(self, + request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_deploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeployIndexRequest): + request = index_endpoint_service.DeployIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_index(self, + request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_undeploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (str): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UndeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UndeployIndexRequest): + request = index_endpoint_service.UndeployIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def mutate_deployed_index(self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.MutateDeployedIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.MutateDeployedIndexRequest): + request = index_endpoint_service.MutateDeployedIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_deployed_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexEndpointServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py new file mode 100644 index 0000000000..9f0411a11c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service + + +class ListIndexEndpointsPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., index_endpoint_service.ListIndexEndpointsResponse], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[index_endpoint.IndexEndpoint]: + for page in self.pages: + yield from page.index_endpoints + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListIndexEndpointsAsyncPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[index_endpoint_service.ListIndexEndpointsResponse]], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[index_endpoint.IndexEndpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.index_endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..3b6ca0341f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexEndpointServiceTransport +from .grpc import IndexEndpointServiceGrpcTransport +from .grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] +_transport_registry['grpc'] = IndexEndpointServiceGrpcTransport +_transport_registry['grpc_asyncio'] = IndexEndpointServiceGrpcAsyncIOTransport + +__all__ = ( + 'IndexEndpointServiceTransport', + 'IndexEndpointServiceGrpcTransport', + 'IndexEndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py new file mode 100644 index 0000000000..959b7b8ed5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class IndexEndpointServiceTransport(abc.ABC): + """Abstract transport class for IndexEndpointService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index_endpoint: gapic_v1.method.wrap_method( + self.create_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.get_index_endpoint: gapic_v1.method.wrap_method( + self.get_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.list_index_endpoints: gapic_v1.method.wrap_method( + self.list_index_endpoints, + default_timeout=5.0, + client_info=client_info, + ), + self.update_index_endpoint: gapic_v1.method.wrap_method( + self.update_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_index_endpoint: gapic_v1.method.wrap_method( + self.delete_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_index: gapic_v1.method.wrap_method( + self.deploy_index, + default_timeout=5.0, + client_info=client_info, + ), + self.undeploy_index: gapic_v1.method.wrap_method( + self.undeploy_index, + default_timeout=5.0, + client_info=client_info, + ), + self.mutate_deployed_index: gapic_v1.method.wrap_method( + self.mutate_deployed_index, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + Union[ + index_endpoint.IndexEndpoint, + Awaitable[index_endpoint.IndexEndpoint] + ]]: + raise NotImplementedError() + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + Union[ + index_endpoint_service.ListIndexEndpointsResponse, + Awaitable[index_endpoint_service.ListIndexEndpointsResponse] + ]]: + raise NotImplementedError() + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + Union[ + gca_index_endpoint.IndexEndpoint, + Awaitable[gca_index_endpoint.IndexEndpoint] + ]]: + raise NotImplementedError() + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def mutate_deployed_index(self) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'IndexEndpointServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..89d0355ca3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -0,0 +1,679 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): + """gRPC backend transport for IndexEndpointService. + + A service for managing Vertex AI's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index_endpoint' not in self._stubs: + self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index_endpoint'] + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + index_endpoint.IndexEndpoint]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index_endpoint' not in self._stubs: + self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['get_index_endpoint'] + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + index_endpoint_service.ListIndexEndpointsResponse]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + ~.ListIndexEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_index_endpoints' not in self._stubs: + self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs['list_index_endpoints'] + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + gca_index_endpoint.IndexEndpoint]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index_endpoint' not in self._stubs: + self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['update_index_endpoint'] + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index_endpoint' not in self._stubs: + self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index_endpoint'] + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_index' not in self._stubs: + self._stubs['deploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_index'] + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_index' not in self._stubs: + self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_index'] + + @property + def mutate_deployed_index(self) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_deployed_index' not in self._stubs: + self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex', + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['mutate_deployed_index'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'IndexEndpointServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..7afa6681ee --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,678 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexEndpointServiceGrpcTransport + + +class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): + """gRPC AsyncIO backend transport for IndexEndpointService. + + A service for managing Vertex AI's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index_endpoint' not in self._stubs: + self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index_endpoint'] + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + Awaitable[index_endpoint.IndexEndpoint]]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index_endpoint' not in self._stubs: + self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['get_index_endpoint'] + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + Awaitable[index_endpoint_service.ListIndexEndpointsResponse]]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + Awaitable[~.ListIndexEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_index_endpoints' not in self._stubs: + self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs['list_index_endpoints'] + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + Awaitable[gca_index_endpoint.IndexEndpoint]]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index_endpoint' not in self._stubs: + self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['update_index_endpoint'] + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index_endpoint' not in self._stubs: + self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index_endpoint'] + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_index' not in self._stubs: + self._stubs['deploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_index'] + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_index' not in self._stubs: + self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_index'] + + @property + def mutate_deployed_index(self) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_deployed_index' not in self._stubs: + self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex', + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['mutate_deployed_index'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'IndexEndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py new file mode 100644 index 0000000000..f62272a024 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import IndexServiceClient +from .async_client import IndexServiceAsyncClient + +__all__ = ( + 'IndexServiceClient', + 'IndexServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py new file mode 100644 index 0000000000..87e1db42b1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -0,0 +1,1432 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport +from .client import IndexServiceClient + + +class IndexServiceAsyncClient: + """A service for creating and managing Vertex AI's Index + resources. + """ + + _client: IndexServiceClient + + DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexServiceClient.index_path) + parse_index_path = staticmethod(IndexServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod(IndexServiceClient.parse_index_endpoint_path) + common_billing_account_path = staticmethod(IndexServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(IndexServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(IndexServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(IndexServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(IndexServiceClient.parse_common_organization_path) + common_project_path = staticmethod(IndexServiceClient.common_project_path) + parse_common_project_path = staticmethod(IndexServiceClient.parse_common_project_path) + common_location_path = staticmethod(IndexServiceClient.common_location_path) + parse_common_location_path = staticmethod(IndexServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return IndexServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> IndexServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(IndexServiceClient).get_transport_class, type(IndexServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, IndexServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = IndexServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_index(self, + request: Union[index_service.CreateIndexRequest, dict] = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexRequest, dict]): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.CreateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index(self, + request: Union[index_service.GetIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexRequest, dict]): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + name (:class:`str`): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.GetIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_indexes(self, + request: Union[index_service.ListIndexesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesAsyncPager: + r"""Lists Indexes in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_indexes(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexesRequest, dict]): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.ListIndexesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_indexes, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index(self, + request: Union[index_service.UpdateIndexRequest, dict] = None, + *, + index: gca_index.Index = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest, dict]): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.UpdateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index.name", request.index.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_index(self, + request: Union[index_service.DeleteIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest, dict]): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + name (:class:`str`): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.DeleteIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py new file mode 100644 index 0000000000..3d9cdc73b6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -0,0 +1,1635 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexServiceGrpcTransport +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +class IndexServiceClientMeta(type): + """Metaclass for the IndexService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] + _transport_registry["grpc"] = IndexServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[IndexServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexServiceClient(metaclass=IndexServiceClientMeta): + """A service for creating and managing Vertex AI's Index + resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def index_path(project: str,location: str,index: str,) -> str: + """Returns a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str,str]: + """Parses a index path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: + """Returns a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str,str]: + """Parses a index_endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, IndexServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexServiceTransport): + # transport is a IndexServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_index(self, + request: Union[index_service.CreateIndexRequest, dict] = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexRequest, dict]): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + parent (str): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.CreateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.CreateIndexRequest): + request = index_service.CreateIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index(self, + request: Union[index_service.GetIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = client.get_index(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexRequest, dict]): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.GetIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.GetIndexRequest): + request = index_service.GetIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_indexes(self, + request: Union[index_service.ListIndexesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesPager: + r"""Lists Indexes in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_indexes(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexesRequest, dict]): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + parent (str): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.ListIndexesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.ListIndexesRequest): + request = index_service.ListIndexesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_indexes] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index(self, + request: Union[index_service.UpdateIndexRequest, dict] = None, + *, + index: gca_index.Index = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates an Index. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest, dict]): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.UpdateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.UpdateIndexRequest): + request = index_service.UpdateIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index.name", request.index.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_index(self, + request: Union[index_service.DeleteIndexRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest, dict]): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.DeleteIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.DeleteIndexRequest): + request = index_service.DeleteIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py new file mode 100644 index 0000000000..e8e247460a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service + + +class ListIndexesPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., index_service.ListIndexesResponse], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[index.Index]: + for page in self.pages: + yield from page.indexes + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListIndexesAsyncPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[index_service.ListIndexesResponse]], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[index.Index]: + async def async_generator(): + async for page in self.pages: + for response in page.indexes: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py new file mode 100644 index 0000000000..651b5193ef --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexServiceTransport +from .grpc import IndexServiceGrpcTransport +from .grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] +_transport_registry['grpc'] = IndexServiceGrpcTransport +_transport_registry['grpc_asyncio'] = IndexServiceGrpcAsyncIOTransport + +__all__ = ( + 'IndexServiceTransport', + 'IndexServiceGrpcTransport', + 'IndexServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py new file mode 100644 index 0000000000..dc74143a69 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class IndexServiceTransport(abc.ABC): + """Abstract transport class for IndexService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index: gapic_v1.method.wrap_method( + self.create_index, + default_timeout=5.0, + client_info=client_info, + ), + self.get_index: gapic_v1.method.wrap_method( + self.get_index, + default_timeout=5.0, + client_info=client_info, + ), + self.list_indexes: gapic_v1.method.wrap_method( + self.list_indexes, + default_timeout=5.0, + client_info=client_info, + ), + self.update_index: gapic_v1.method.wrap_method( + self.update_index, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_index: gapic_v1.method.wrap_method( + self.delete_index, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + Union[ + index.Index, + Awaitable[index.Index] + ]]: + raise NotImplementedError() + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + Union[ + index_service.ListIndexesResponse, + Awaitable[index_service.ListIndexesResponse] + ]]: + raise NotImplementedError() + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'IndexServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py new file mode 100644 index 0000000000..a48d3d1ac0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py @@ -0,0 +1,598 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexServiceGrpcTransport(IndexServiceTransport): + """gRPC backend transport for IndexService. + + A service for creating and managing Vertex AI's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index' not in self._stubs: + self._stubs['create_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index'] + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + index.Index]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + ~.Index]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index' not in self._stubs: + self._stubs['get_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs['get_index'] + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + index_service.ListIndexesResponse]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + ~.ListIndexesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_indexes' not in self._stubs: + self._stubs['list_indexes'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs['list_indexes'] + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index' not in self._stubs: + self._stubs['update_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_index'] + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index' not in self._stubs: + self._stubs['delete_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'IndexServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..9e0306dadd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py @@ -0,0 +1,597 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexServiceGrpcTransport + + +class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): + """gRPC AsyncIO backend transport for IndexService. + + A service for creating and managing Vertex AI's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index' not in self._stubs: + self._stubs['create_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index'] + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + Awaitable[index.Index]]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + Awaitable[~.Index]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index' not in self._stubs: + self._stubs['get_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs['get_index'] + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + Awaitable[index_service.ListIndexesResponse]]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + Awaitable[~.ListIndexesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_indexes' not in self._stubs: + self._stubs['list_indexes'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs['list_indexes'] + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index' not in self._stubs: + self._stubs['update_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_index'] + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index' not in self._stubs: + self._stubs['delete_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'IndexServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py new file mode 100644 index 0000000000..3a5ccd23d8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import JobServiceClient +from .async_client import JobServiceAsyncClient + +__all__ = ( + 'JobServiceClient', + 'JobServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py new file mode 100644 index 0000000000..5c7d4b8fe2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -0,0 +1,3914 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.job_service import pagers +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import completion_stats +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport +from .client import JobServiceClient + + +class JobServiceAsyncClient: + """A service for creating and managing Vertex AI's jobs.""" + + _client: JobServiceClient + + DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT + + batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) + parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) + custom_job_path = staticmethod(JobServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) + data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) + parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) + dataset_path = staticmethod(JobServiceClient.dataset_path) + parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) + endpoint_path = staticmethod(JobServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(JobServiceClient.parse_endpoint_path) + hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) + parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) + model_path = staticmethod(JobServiceClient.model_path) + parse_model_path = staticmethod(JobServiceClient.parse_model_path) + model_deployment_monitoring_job_path = staticmethod(JobServiceClient.model_deployment_monitoring_job_path) + parse_model_deployment_monitoring_job_path = staticmethod(JobServiceClient.parse_model_deployment_monitoring_job_path) + network_path = staticmethod(JobServiceClient.network_path) + parse_network_path = staticmethod(JobServiceClient.parse_network_path) + tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) + trial_path = staticmethod(JobServiceClient.trial_path) + parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) + common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(JobServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(JobServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) + common_project_path = staticmethod(JobServiceClient.common_project_path) + parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) + common_location_path = staticmethod(JobServiceClient.common_location_path) + parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return JobServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, JobServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = JobServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_custom_job(self, + request: Union[job_service.CreateCustomJobRequest, dict] = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1beta1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = await client.create_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest, dict]): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (:class:`google.cloud.aiplatform_v1beta1.types.CustomJob`): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_custom_job(self, + request: Union[job_service.GetCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest, dict]): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_custom_jobs(self, + request: Union[job_service.ListCustomJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsAsyncPager: + r"""Lists CustomJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest, dict]): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListCustomJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_custom_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListCustomJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_custom_job(self, + request: Union[job_service.DeleteCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest, dict]): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_custom_job(self, + request: Union[job_service.CancelCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_custom_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest, dict]): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_data_labeling_job(self, + request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1beta1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = await client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (:class:`google.cloud.aiplatform_v1beta1.types.DataLabelingJob`): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_data_labeling_job(self, + request: Union[job_service.GetDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_data_labeling_jobs(self, + request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsAsyncPager: + r"""Lists DataLabelingJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest, dict]): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListDataLabelingJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_labeling_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataLabelingJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_data_labeling_job(self, + request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_data_labeling_job(self, + request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_data_labeling_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_hyperparameter_tuning_job(self, + request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = await client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob`): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_hyperparameter_tuning_job(self, + request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_hyperparameter_tuning_jobs(self, + request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsAsyncPager: + r"""Lists HyperparameterTuningJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest, dict]): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListHyperparameterTuningJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_hyperparameter_tuning_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListHyperparameterTuningJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_hyperparameter_tuning_job(self, + request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a HyperparameterTuningJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_hyperparameter_tuning_job(self, + request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_hyperparameter_tuning_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_batch_prediction_job(self, + request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1beta1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = await client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (:class:`google.cloud.aiplatform_v1beta1.types.BatchPredictionJob`): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_batch_prediction_job(self, + request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_batch_prediction_jobs(self, + request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsAsyncPager: + r"""Lists BatchPredictionJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest, dict]): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListBatchPredictionJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_batch_prediction_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBatchPredictionJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_batch_prediction_job(self, + request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_batch_prediction_job(self, + request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_batch_prediction_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_model_deployment_monitoring_job(self, + request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = await client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model_deployment_monitoring_job, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def search_model_deployment_monitoring_stats_anomalies(self, + request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (:class:`str`): + Required. ModelDeploymentMonitoring Job resource name. + Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The DeployedModel ID of the + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_model_deployment_monitoring_stats_anomalies, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_deployment_monitoring_job(self, + request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_deployment_monitoring_jobs(self, + request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest, dict]): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_deployment_monitoring_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model_deployment_monitoring_job(self, + request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask is used to specify the fields + to be overwritten in the ModelDeploymentMonitoringJob + resource by the update. The fields specified in the + update_mask are relative to the resource, not the full + request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override + all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . + or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_model_deployment_monitoring_job(self, + request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def pause_model_deployment_monitoring_job(self, + request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.pause_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.pause_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def resume_model_deployment_monitoring_job(self, + request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.resume_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.resume_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py new file mode 100644 index 0000000000..e78ed57d18 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -0,0 +1,4198 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.job_service import pagers +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import completion_stats +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import JobServiceGrpcTransport +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +class JobServiceClientMeta(type): + """Metaclass for the JobService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] + _transport_registry["grpc"] = JobServiceGrpcTransport + _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[JobServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobServiceClient(metaclass=JobServiceClientMeta): + """A service for creating and managing Vertex AI's jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: + """Returns a fully-qualified batch_prediction_job string.""" + return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + + @staticmethod + def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: + """Parses a batch_prediction_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: + """Returns a fully-qualified data_labeling_job string.""" + return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + + @staticmethod + def parse_data_labeling_job_path(path: str) -> Dict[str,str]: + """Parses a data_labeling_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: + """Returns a fully-qualified hyperparameter_tuning_job string.""" + return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + + @staticmethod + def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: + """Parses a hyperparameter_tuning_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: + """Returns a fully-qualified model_deployment_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + + @staticmethod + def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: + """Parses a model_deployment_monitoring_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: + """Returns a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str,str]: + """Parses a tensorboard path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def trial_path(project: str,location: str,study: str,trial: str,) -> str: + """Returns a fully-qualified trial string.""" + return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + + @staticmethod + def parse_trial_path(path: str) -> Dict[str,str]: + """Parses a trial path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, JobServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobServiceTransport): + # transport is a JobServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_custom_job(self, + request: Union[job_service.CreateCustomJobRequest, dict] = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1beta1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = client.create_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest, dict]): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + parent (str): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateCustomJobRequest): + request = job_service.CreateCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_custom_job(self, + request: Union[job_service.GetCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_custom_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest, dict]): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetCustomJobRequest): + request = job_service.GetCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_custom_jobs(self, + request: Union[job_service.ListCustomJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsPager: + r"""Lists CustomJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest, dict]): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListCustomJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListCustomJobsRequest): + request = job_service.ListCustomJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListCustomJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_custom_job(self, + request: Union[job_service.DeleteCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a CustomJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest, dict]): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + name (str): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteCustomJobRequest): + request = job_service.DeleteCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_custom_job(self, + request: Union[job_service.CancelCustomJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_custom_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest, dict]): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelCustomJobRequest): + request = job_service.CancelCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_data_labeling_job(self, + request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1beta1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateDataLabelingJobRequest): + request = job_service.CreateDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_data_labeling_job(self, + request: Union[job_service.GetDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetDataLabelingJobRequest): + request = job_service.GetDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_data_labeling_jobs(self, + request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsPager: + r"""Lists DataLabelingJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest, dict]): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListDataLabelingJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListDataLabelingJobsRequest): + request = job_service.ListDataLabelingJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataLabelingJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_data_labeling_job(self, + request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a DataLabelingJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteDataLabelingJobRequest): + request = job_service.DeleteDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_data_labeling_job(self, + request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_data_labeling_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest, dict]): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelDataLabelingJobRequest): + request = job_service.CancelDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_hyperparameter_tuning_job(self, + request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + parent (str): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): + request = job_service.CreateHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_hyperparameter_tuning_job(self, + request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): + request = job_service.GetHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_hyperparameter_tuning_jobs(self, + request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsPager: + r"""Lists HyperparameterTuningJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest, dict]): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListHyperparameterTuningJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): + request = job_service.ListHyperparameterTuningJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListHyperparameterTuningJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_hyperparameter_tuning_job(self, + request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a HyperparameterTuningJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): + request = job_service.DeleteHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_hyperparameter_tuning_job(self, + request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_hyperparameter_tuning_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest, dict]): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): + request = job_service.CancelHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_batch_prediction_job(self, + request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1beta1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + parent (str): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateBatchPredictionJobRequest): + request = job_service.CreateBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_batch_prediction_job(self, + request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetBatchPredictionJobRequest): + request = job_service.GetBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_batch_prediction_jobs(self, + request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsPager: + r"""Lists BatchPredictionJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest, dict]): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListBatchPredictionJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListBatchPredictionJobsRequest): + request = job_service.ListBatchPredictionJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBatchPredictionJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_batch_prediction_job(self, + request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): + request = job_service.DeleteBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_batch_prediction_job(self, + request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_batch_prediction_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest, dict]): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelBatchPredictionJobRequest): + request = job_service.CancelBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_model_deployment_monitoring_job(self, + request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateModelDeploymentMonitoringJobRequest): + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def search_model_deployment_monitoring_stats_anomalies(self, + request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The DeployedModel ID of the + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_model_deployment_monitoring_stats_anomalies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_deployment_monitoring_job(self, + request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetModelDeploymentMonitoringJobRequest): + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_deployment_monitoring_jobs(self, + request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest, dict]): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListModelDeploymentMonitoringJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListModelDeploymentMonitoringJobsRequest): + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_deployment_monitoring_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model_deployment_monitoring_job(self, + request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask is used to specify the fields + to be overwritten in the ModelDeploymentMonitoringJob + resource by the update. The fields specified in the + update_mask are relative to the resource, not the full + request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override + all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . + or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.UpdateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.UpdateModelDeploymentMonitoringJobRequest): + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_model_deployment_monitoring_job(self, + request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a ModelDeploymentMonitoringJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteModelDeploymentMonitoringJobRequest): + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def pause_model_deployment_monitoring_job(self, + request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.pause_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.PauseModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.PauseModelDeploymentMonitoringJobRequest): + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.pause_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def resume_model_deployment_monitoring_job(self, + request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.resume_model_deployment_monitoring_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ResumeModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ResumeModelDeploymentMonitoringJobRequest): + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resume_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py new file mode 100644 index 0000000000..82a7e9e753 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py @@ -0,0 +1,750 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job + + +class ListCustomJobsPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListCustomJobsResponse], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[custom_job.CustomJob]: + for page in self.pages: + yield from page.custom_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListCustomJobsAsyncPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[custom_job.CustomJob]: + async def async_generator(): + async for page in self.pages: + for response in page.custom_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListDataLabelingJobsResponse], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[data_labeling_job.DataLabelingJob]: + for page in self.pages: + yield from page.data_labeling_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsAsyncPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[data_labeling_job.DataLabelingJob]: + async def async_generator(): + async for page in self.pages: + for response in page.data_labeling_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[hyperparameter_tuning_job.HyperparameterTuningJob]: + for page in self.pages: + yield from page.hyperparameter_tuning_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsAsyncPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[hyperparameter_tuning_job.HyperparameterTuningJob]: + async def async_generator(): + async for page in self.pages: + for response in page.hyperparameter_tuning_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListBatchPredictionJobsResponse], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[batch_prediction_job.BatchPredictionJob]: + for page in self.pages: + yield from page.batch_prediction_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsAsyncPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[batch_prediction_job.BatchPredictionJob]: + async def async_generator(): + async for page in self.pages: + for response in page.batch_prediction_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + for page in self.pages: + yield from page.monitoring_stats + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + async def async_generator(): + async for page in self.pages: + for response in page.monitoring_stats: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListModelDeploymentMonitoringJobsResponse], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + for page in self.pages: + yield from page.model_deployment_monitoring_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsAsyncPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + async def async_generator(): + async for page in self.pages: + for response in page.model_deployment_monitoring_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py new file mode 100644 index 0000000000..05345304a2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobServiceTransport +from .grpc import JobServiceGrpcTransport +from .grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] +_transport_registry['grpc'] = JobServiceGrpcTransport +_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport + +__all__ = ( + 'JobServiceTransport', + 'JobServiceGrpcTransport', + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py new file mode 100644 index 0000000000..c8cc27f9b8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -0,0 +1,641 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class JobServiceTransport(abc.ABC): + """Abstract transport class for JobService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_custom_job: gapic_v1.method.wrap_method( + self.create_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_custom_job: gapic_v1.method.wrap_method( + self.get_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_custom_jobs: gapic_v1.method.wrap_method( + self.list_custom_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_custom_job: gapic_v1.method.wrap_method( + self.delete_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_custom_job: gapic_v1.method.wrap_method( + self.cancel_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_data_labeling_job: gapic_v1.method.wrap_method( + self.create_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_data_labeling_job: gapic_v1.method.wrap_method( + self.get_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_data_labeling_jobs: gapic_v1.method.wrap_method( + self.list_data_labeling_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_data_labeling_job: gapic_v1.method.wrap_method( + self.delete_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_data_labeling_job: gapic_v1.method.wrap_method( + self.cancel_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.create_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.get_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( + self.list_hyperparameter_tuning_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.delete_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.cancel_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_batch_prediction_job: gapic_v1.method.wrap_method( + self.create_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_batch_prediction_job: gapic_v1.method.wrap_method( + self.get_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( + self.list_batch_prediction_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_batch_prediction_job: gapic_v1.method.wrap_method( + self.delete_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( + self.cancel_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.create_model_deployment_monitoring_job, + default_timeout=60.0, + client_info=client_info, + ), + self.search_model_deployment_monitoring_stats_anomalies: gapic_v1.method.wrap_method( + self.search_model_deployment_monitoring_stats_anomalies, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.get_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_deployment_monitoring_jobs: gapic_v1.method.wrap_method( + self.list_model_deployment_monitoring_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.update_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.update_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.delete_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.pause_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.pause_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.resume_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.resume_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Union[ + gca_custom_job.CustomJob, + Awaitable[gca_custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Union[ + custom_job.CustomJob, + Awaitable[custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Union[ + job_service.ListCustomJobsResponse, + Awaitable[job_service.ListCustomJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Union[ + gca_data_labeling_job.DataLabelingJob, + Awaitable[gca_data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Union[ + data_labeling_job.DataLabelingJob, + Awaitable[data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Union[ + job_service.ListDataLabelingJobsResponse, + Awaitable[job_service.ListDataLabelingJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Union[ + gca_hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Union[ + hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Union[ + job_service.ListHyperparameterTuningJobsResponse, + Awaitable[job_service.ListHyperparameterTuningJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Union[ + gca_batch_prediction_job.BatchPredictionJob, + Awaitable[gca_batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Union[ + batch_prediction_job.BatchPredictionJob, + Awaitable[batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Union[ + job_service.ListBatchPredictionJobsResponse, + Awaitable[job_service.ListBatchPredictionJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + Union[ + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob] + ]]: + raise NotImplementedError() + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Union[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse] + ]]: + raise NotImplementedError() + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + Union[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob] + ]]: + raise NotImplementedError() + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + Union[ + job_service.ListModelDeploymentMonitoringJobsResponse, + Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse] + ]]: + raise NotImplementedError() + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'JobServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py new file mode 100644 index 0000000000..40e0fec321 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -0,0 +1,1262 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO + + +class JobServiceGrpcTransport(JobServiceTransport): + """gRPC backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + gca_custom_job.CustomJob]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + custom_job.CustomJob]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + job_service.ListCustomJobsResponse]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + ~.ListCustomJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + gca_data_labeling_job.DataLabelingJob]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + data_labeling_job.DataLabelingJob]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + job_service.ListDataLabelingJobsResponse]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + ~.ListDataLabelingJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + gca_hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + job_service.ListHyperparameterTuningJobsResponse]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + ~.ListHyperparameterTuningJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + gca_batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + job_service.ListBatchPredictionJobsResponse]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + ~.ListBatchPredictionJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model_deployment_monitoring_job' not in self._stubs: + self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['create_model_deployment_monitoring_job'] + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + ~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: + self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs['search_model_deployment_monitoring_stats_anomalies'] + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_deployment_monitoring_job' not in self._stubs: + self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['get_model_deployment_monitoring_job'] + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + job_service.ListModelDeploymentMonitoringJobsResponse]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + ~.ListModelDeploymentMonitoringJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_deployment_monitoring_jobs' not in self._stubs: + self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs['list_model_deployment_monitoring_jobs'] + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model_deployment_monitoring_job' not in self._stubs: + self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_model_deployment_monitoring_job'] + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_deployment_monitoring_job' not in self._stubs: + self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_deployment_monitoring_job'] + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'pause_model_deployment_monitoring_job' not in self._stubs: + self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['pause_model_deployment_monitoring_job'] + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'resume_model_deployment_monitoring_job' not in self._stubs: + self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['resume_model_deployment_monitoring_job'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'JobServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..821c48b81f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -0,0 +1,1261 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import JobServiceGrpcTransport + + +class JobServiceGrpcAsyncIOTransport(JobServiceTransport): + """gRPC AsyncIO backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Awaitable[gca_custom_job.CustomJob]]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Awaitable[custom_job.CustomJob]]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Awaitable[job_service.ListCustomJobsResponse]]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + Awaitable[~.ListCustomJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Awaitable[gca_data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Awaitable[data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Awaitable[job_service.ListDataLabelingJobsResponse]]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + Awaitable[~.ListDataLabelingJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + Awaitable[~.ListHyperparameterTuningJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Awaitable[batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Awaitable[job_service.ListBatchPredictionJobsResponse]]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + Awaitable[~.ListBatchPredictionJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model_deployment_monitoring_job' not in self._stubs: + self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['create_model_deployment_monitoring_job'] + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: + self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs['search_model_deployment_monitoring_stats_anomalies'] + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_deployment_monitoring_job' not in self._stubs: + self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['get_model_deployment_monitoring_job'] + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_deployment_monitoring_jobs' not in self._stubs: + self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs['list_model_deployment_monitoring_jobs'] + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model_deployment_monitoring_job' not in self._stubs: + self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_model_deployment_monitoring_job'] + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_deployment_monitoring_job' not in self._stubs: + self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_deployment_monitoring_job'] + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'pause_model_deployment_monitoring_job' not in self._stubs: + self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['pause_model_deployment_monitoring_job'] + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'resume_model_deployment_monitoring_job' not in self._stubs: + self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['resume_model_deployment_monitoring_job'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py new file mode 100644 index 0000000000..97cb7deb5d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MetadataServiceClient +from .async_client import MetadataServiceAsyncClient + +__all__ = ( + 'MetadataServiceClient', + 'MetadataServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py new file mode 100644 index 0000000000..450e453af7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -0,0 +1,4287 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport +from .client import MetadataServiceClient + + +class MetadataServiceAsyncClient: + """Service for reading and writing metadata entries.""" + + _client: MetadataServiceClient + + DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(MetadataServiceClient.artifact_path) + parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) + context_path = staticmethod(MetadataServiceClient.context_path) + parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) + execution_path = staticmethod(MetadataServiceClient.execution_path) + parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) + metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) + parse_metadata_schema_path = staticmethod(MetadataServiceClient.parse_metadata_schema_path) + metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) + parse_metadata_store_path = staticmethod(MetadataServiceClient.parse_metadata_store_path) + common_billing_account_path = staticmethod(MetadataServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(MetadataServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(MetadataServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(MetadataServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(MetadataServiceClient.parse_common_organization_path) + common_project_path = staticmethod(MetadataServiceClient.common_project_path) + parse_common_project_path = staticmethod(MetadataServiceClient.parse_common_project_path) + common_location_path = staticmethod(MetadataServiceClient.common_location_path) + parse_common_location_path = staticmethod(MetadataServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return MetadataServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MetadataServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_metadata_store(self, + request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Initializes a MetadataStore, including allocation of + resources. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + parent (:class:`str`): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (:class:`google.cloud.aiplatform_v1beta1.types.MetadataStore`): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (:class:`str`): + The {metadatastore} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_metadata_store(self, + request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_store(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + name (:class:`str`): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_metadata_stores(self, + request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresAsyncPager: + r"""Lists MetadataStores for a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + parent (:class:`str`): + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListMetadataStoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_stores, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataStoresAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_metadata_store(self, + request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + name (:class:`str`): + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_artifact(self, + request: Union[metadata_service.CreateArtifactRequest, dict] = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (:class:`str`): + The {artifact} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_artifact(self, + request: Union[metadata_service.GetArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = await client.get_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetArtifactRequest, dict]): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + name (:class:`str`): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_artifacts(self, + request: Union[metadata_service.ListArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsAsyncPager: + r"""Lists Artifacts in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + parent (:class:`str`): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_artifacts, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListArtifactsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_artifact(self, + request: Union[metadata_service.UpdateArtifactRequest, dict] = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateArtifactRequest( + ) + + # Make the request + response = await client.update_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact.name", request.artifact.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_artifact(self, + request: Union[metadata_service.DeleteArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest, dict]): + The request object. Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. + name (:class:`str`): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_artifact, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_artifacts(self, + request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Artifacts. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. + parent (:class:`str`): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.PurgeArtifactsResponse` + Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.PurgeArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_artifacts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeArtifactsResponse, + metadata_type=metadata_service.PurgeArtifactsMetadata, + ) + + # Done; return the response. + return response + + async def create_context(self, + request: Union[metadata_service.CreateContextRequest, dict] = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateContextRequest, dict]): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (:class:`str`): + The {context} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_context(self, + request: Union[metadata_service.GetContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = await client.get_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetContextRequest, dict]): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + name (:class:`str`): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_contexts(self, + request: Union[metadata_service.ListContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsAsyncPager: + r"""Lists Contexts on the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListContextsRequest, dict]): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + parent (:class:`str`): + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_contexts, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListContextsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_context(self, + request: Union[metadata_service.UpdateContextRequest, dict] = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateContextRequest( + ) + + # Make the request + response = await client.update_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateContextRequest, dict]): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context.name", request.context.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_context(self, + request: Union[metadata_service.DeleteContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteContextRequest, dict]): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + name (:class:`str`): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_contexts(self, + request: Union[metadata_service.PurgeContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Contexts. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_purge_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. + parent (:class:`str`): + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.PurgeContextsResponse` + Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.PurgeContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_contexts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeContextsResponse, + metadata_type=metadata_service.PurgeContextsMetadata, + ) + + # Done; return the response. + return response + + async def add_context_artifacts_and_executions(self, + request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + context (:class:`str`): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (:class:`Sequence[str]`): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (:class:`Sequence[str]`): + The resource names of the Executions to associate with + the Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts: + request.artifacts.extend(artifacts) + if executions: + request.executions.extend(executions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_artifacts_and_executions, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def add_context_children(self, + request: Union[metadata_service.AddContextChildrenRequest, dict] = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_add_context_children(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_children(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest, dict]): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + context (:class:`str`): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (:class:`Sequence[str]`): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddContextChildrenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts: + request.child_contexts.extend(child_contexts) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_children, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_context_lineage_subgraph(self, + request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, + *, + context: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = await client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + context (:class:`str`): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryContextLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_context_lineage_subgraph, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_execution(self, + request: Union[metadata_service.CreateExecutionRequest, dict] = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (:class:`str`): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_execution(self, + request: Union[metadata_service.GetExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetExecutionRequest, dict]): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + name (:class:`str`): + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_executions(self, + request: Union[metadata_service.ListExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsAsyncPager: + r"""Lists Executions in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + parent (:class:`str`): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_executions, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExecutionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_execution(self, + request: Union[metadata_service.UpdateExecutionRequest, dict] = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExecutionRequest( + ) + + # Make the request + response = await client.update_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution.name", request.execution.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_execution(self, + request: Union[metadata_service.DeleteExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest, dict]): + The request object. Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. + name (:class:`str`): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_execution, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def purge_executions(self, + request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Purges Executions. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_purge_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. + parent (:class:`str`): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.PurgeExecutionsResponse` + Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.PurgeExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.purge_executions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + metadata_service.PurgeExecutionsResponse, + metadata_type=metadata_service.PurgeExecutionsMetadata, + ) + + # Done; return the response. + return response + + async def add_execution_events(self, + request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_add_execution_events(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.add_execution_events(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest, dict]): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + execution (:class:`str`): + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.Event]`): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddExecutionEventsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events: + request.events.extend(events) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_execution_events, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_execution_inputs_and_outputs(self, + request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, + *, + execution: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest, dict]): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (:class:`str`): + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_execution_inputs_and_outputs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_metadata_schema(self, + request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1beta1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1beta1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = await client.create_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + parent (:class:`str`): + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (:class:`google.cloud.aiplatform_v1beta1.types.MetadataSchema`): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (:class:`str`): + The {metadata_schema} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_schema, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_metadata_schema(self, + request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + name (:class:`str`): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_schema, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_metadata_schemas(self, + request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasAsyncPager: + r"""Lists MetadataSchemas. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + parent (:class:`str`): + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListMetadataSchemasRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_schemas, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataSchemasAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_artifact_lineage_subgraph(self, + request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, + *, + artifact: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = await client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (:class:`str`): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact", request.artifact), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MetadataServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py new file mode 100644 index 0000000000..05f4b32714 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -0,0 +1,4517 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MetadataServiceGrpcTransport +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +class MetadataServiceClientMeta(type): + """Metaclass for the MetadataService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] + _transport_registry["grpc"] = MetadataServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MetadataServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MetadataServiceClient(metaclass=MetadataServiceClientMeta): + """Service for reading and writing metadata entries.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str,str]: + """Parses a artifact path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str,str]: + """Parses a context path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str,str]: + """Parses a execution path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def metadata_schema_path(project: str,location: str,metadata_store: str,metadata_schema: str,) -> str: + """Returns a fully-qualified metadata_schema string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) + + @staticmethod + def parse_metadata_schema_path(path: str) -> Dict[str,str]: + """Parses a metadata_schema path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def metadata_store_path(project: str,location: str,metadata_store: str,) -> str: + """Returns a fully-qualified metadata_store string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) + + @staticmethod + def parse_metadata_store_path(path: str) -> Dict[str,str]: + """Parses a metadata_store path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MetadataServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MetadataServiceTransport): + # transport is a MetadataServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_metadata_store(self, + request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Initializes a MetadataStore, including allocation of + resources. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + parent (str): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (str): + The {metadatastore} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataStoreRequest): + request = metadata_service.CreateMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_metadata_store(self, + request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_store(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + name (str): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataStoreRequest): + request = metadata_service.GetMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_metadata_stores(self, + request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresPager: + r"""Lists MetadataStores for a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + parent (str): + Required. The Location whose MetadataStores should be + listed. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataStoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataStoresRequest): + request = metadata_service.ListMetadataStoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataStoresPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_metadata_store(self, + request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest, dict]): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + name (str): + Required. The resource name of the MetadataStore to + delete. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): + request = metadata_service.DeleteMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def create_artifact(self, + request: Union[metadata_service.CreateArtifactRequest, dict] = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + parent (str): + Required. The resource name of the MetadataStore where + the Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (str): + The {artifact} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateArtifactRequest): + request = metadata_service.CreateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_artifact(self, + request: Union[metadata_service.GetArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = client.get_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetArtifactRequest, dict]): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + name (str): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetArtifactRequest): + request = metadata_service.GetArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_artifacts(self, + request: Union[metadata_service.ListArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsPager: + r"""Lists Artifacts in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + parent (str): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListArtifactsRequest): + request = metadata_service.ListArtifactsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListArtifactsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_artifact(self, + request: Union[metadata_service.UpdateArtifactRequest, dict] = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateArtifactRequest( + ) + + # Make the request + response = client.update_artifact(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest, dict]): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateArtifactRequest): + request = metadata_service.UpdateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact.name", request.artifact.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_artifact(self, + request: Union[metadata_service.DeleteArtifactRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Artifact. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest, dict]): + The request object. Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. + name (str): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteArtifactRequest): + request = metadata_service.DeleteArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_artifacts(self, + request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Artifacts. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. + parent (str): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.PurgeArtifactsResponse` + Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeArtifactsRequest): + request = metadata_service.PurgeArtifactsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeArtifactsResponse, + metadata_type=metadata_service.PurgeArtifactsMetadata, + ) + + # Done; return the response. + return response + + def create_context(self, + request: Union[metadata_service.CreateContextRequest, dict] = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateContextRequest, dict]): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + parent (str): + Required. The resource name of the MetadataStore where + the Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (str): + The {context} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateContextRequest): + request = metadata_service.CreateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_context(self, + request: Union[metadata_service.GetContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = client.get_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetContextRequest, dict]): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + name (str): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetContextRequest): + request = metadata_service.GetContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_contexts(self, + request: Union[metadata_service.ListContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsPager: + r"""Lists Contexts on the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListContextsRequest, dict]): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + parent (str): + Required. The MetadataStore whose Contexts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListContextsRequest): + request = metadata_service.ListContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListContextsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_context(self, + request: Union[metadata_service.UpdateContextRequest, dict] = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateContextRequest( + ) + + # Make the request + response = client.update_context(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateContextRequest, dict]): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateContextRequest): + request = metadata_service.UpdateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context.name", request.context.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_context(self, + request: Union[metadata_service.DeleteContextRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a stored Context. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteContextRequest, dict]): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + name (str): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteContextRequest): + request = metadata_service.DeleteContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_contexts(self, + request: Union[metadata_service.PurgeContextsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Contexts. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_purge_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. + parent (str): + Required. The metadata store to purge Contexts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.PurgeContextsResponse` + Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeContextsRequest): + request = metadata_service.PurgeContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeContextsResponse, + metadata_type=metadata_service.PurgeContextsMetadata, + ) + + # Done; return the response. + return response + + def add_context_artifacts_and_executions(self, + request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + context (str): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (Sequence[str]): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (Sequence[str]): + The resource names of the Executions to associate with + the Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextArtifactsAndExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextArtifactsAndExecutionsRequest): + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts is not None: + request.artifacts = artifacts + if executions is not None: + request.executions = executions + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_artifacts_and_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_context_children(self, + request: Union[metadata_service.AddContextChildrenRequest, dict] = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_add_context_children(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_children(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest, dict]): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + context (str): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (Sequence[str]): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextChildrenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextChildrenRequest): + request = metadata_service.AddContextChildrenRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts is not None: + request.child_contexts = child_contexts + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_children] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_context_lineage_subgraph(self, + request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, + *, + context: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + context (str): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryContextLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): + request = metadata_service.QueryContextLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_context_lineage_subgraph] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_execution(self, + request: Union[metadata_service.CreateExecutionRequest, dict] = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + parent (str): + Required. The resource name of the MetadataStore where + the Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (str): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateExecutionRequest): + request = metadata_service.CreateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_execution(self, + request: Union[metadata_service.GetExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = client.get_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetExecutionRequest, dict]): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + name (str): + Required. The resource name of the Execution to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetExecutionRequest): + request = metadata_service.GetExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_executions(self, + request: Union[metadata_service.ListExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsPager: + r"""Lists Executions in the MetadataStore. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + parent (str): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListExecutionsRequest): + request = metadata_service.ListExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExecutionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_execution(self, + request: Union[metadata_service.UpdateExecutionRequest, dict] = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExecutionRequest( + ) + + # Make the request + response = client.update_execution(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest, dict]): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating + which fields should be updated. + Functionality of this field is not yet + supported. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateExecutionRequest): + request = metadata_service.UpdateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution.name", request.execution.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_execution(self, + request: Union[metadata_service.DeleteExecutionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Execution. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest, dict]): + The request object. Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. + name (str): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteExecutionRequest): + request = metadata_service.DeleteExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def purge_executions(self, + request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Purges Executions. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_purge_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest, dict]): + The request object. Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. + parent (str): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.PurgeExecutionsResponse` + Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.PurgeExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.PurgeExecutionsRequest): + request = metadata_service.PurgeExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.purge_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + metadata_service.PurgeExecutionsResponse, + metadata_type=metadata_service.PurgeExecutionsMetadata, + ) + + # Done; return the response. + return response + + def add_execution_events(self, + request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_add_execution_events(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = client.add_execution_events(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest, dict]): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + execution (str): + Required. The resource name of the Execution that the + Events connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddExecutionEventsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddExecutionEventsRequest): + request = metadata_service.AddExecutionEventsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events is not None: + request.events = events + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_execution_events] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_execution_inputs_and_outputs(self, + request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, + *, + execution: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest, dict]): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (str): + Required. The resource name of the Execution whose input + and output Artifacts should be retrieved as a + LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryExecutionInputsAndOutputsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryExecutionInputsAndOutputsRequest): + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_execution_inputs_and_outputs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_metadata_schema(self, + request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1beta1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1beta1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = client.create_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + parent (str): + Required. The resource name of the MetadataStore where + the MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with + the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): + request = metadata_service.CreateMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_metadata_schema(self, + request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_schema(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest, dict]): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + name (str): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataSchemaRequest): + request = metadata_service.GetMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_metadata_schemas(self, + request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasPager: + r"""Lists MetadataSchemas. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest, dict]): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + parent (str): + Required. The MetadataStore whose MetadataSchemas should + be listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataSchemasRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataSchemasRequest): + request = metadata_service.ListMetadataSchemasRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataSchemasPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_artifact_lineage_subgraph(self, + request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, + *, + artifact: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest, dict]): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (str): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryArtifactLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryArtifactLineageSubgraphRequest): + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_artifact_lineage_subgraph] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact", request.artifact), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MetadataServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py new file mode 100644 index 0000000000..d7e4e70e66 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py @@ -0,0 +1,628 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store + + +class ListMetadataStoresPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListMetadataStoresResponse], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[metadata_store.MetadataStore]: + for page in self.pages: + yield from page.metadata_stores + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataStoresAsyncPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[metadata_store.MetadataStore]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_stores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListArtifactsPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListArtifactsResponse], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[artifact.Artifact]: + for page in self.pages: + yield from page.artifacts + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListArtifactsAsyncPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[artifact.Artifact]: + async def async_generator(): + async for page in self.pages: + for response in page.artifacts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListContextsPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListContextsResponse], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[context.Context]: + for page in self.pages: + yield from page.contexts + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListContextsAsyncPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[context.Context]: + async def async_generator(): + async for page in self.pages: + for response in page.contexts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListExecutionsPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListExecutionsResponse], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[execution.Execution]: + for page in self.pages: + yield from page.executions + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListExecutionsAsyncPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[execution.Execution]: + async def async_generator(): + async for page in self.pages: + for response in page.executions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListMetadataSchemasResponse], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[metadata_schema.MetadataSchema]: + for page in self.pages: + yield from page.metadata_schemas + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasAsyncPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[metadata_schema.MetadataSchema]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_schemas: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py new file mode 100644 index 0000000000..63e43972af --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MetadataServiceTransport +from .grpc import MetadataServiceGrpcTransport +from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] +_transport_registry['grpc'] = MetadataServiceGrpcTransport +_transport_registry['grpc_asyncio'] = MetadataServiceGrpcAsyncIOTransport + +__all__ = ( + 'MetadataServiceTransport', + 'MetadataServiceGrpcTransport', + 'MetadataServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py new file mode 100644 index 0000000000..db4d743103 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py @@ -0,0 +1,682 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MetadataServiceTransport(abc.ABC): + """Abstract transport class for MetadataService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_metadata_store: gapic_v1.method.wrap_method( + self.create_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.get_metadata_store: gapic_v1.method.wrap_method( + self.get_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.list_metadata_stores: gapic_v1.method.wrap_method( + self.list_metadata_stores, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_metadata_store: gapic_v1.method.wrap_method( + self.delete_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.create_artifact: gapic_v1.method.wrap_method( + self.create_artifact, + default_timeout=5.0, + client_info=client_info, + ), + self.get_artifact: gapic_v1.method.wrap_method( + self.get_artifact, + default_timeout=5.0, + client_info=client_info, + ), + self.list_artifacts: gapic_v1.method.wrap_method( + self.list_artifacts, + default_timeout=5.0, + client_info=client_info, + ), + self.update_artifact: gapic_v1.method.wrap_method( + self.update_artifact, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_artifact: gapic_v1.method.wrap_method( + self.delete_artifact, + default_timeout=None, + client_info=client_info, + ), + self.purge_artifacts: gapic_v1.method.wrap_method( + self.purge_artifacts, + default_timeout=None, + client_info=client_info, + ), + self.create_context: gapic_v1.method.wrap_method( + self.create_context, + default_timeout=5.0, + client_info=client_info, + ), + self.get_context: gapic_v1.method.wrap_method( + self.get_context, + default_timeout=5.0, + client_info=client_info, + ), + self.list_contexts: gapic_v1.method.wrap_method( + self.list_contexts, + default_timeout=5.0, + client_info=client_info, + ), + self.update_context: gapic_v1.method.wrap_method( + self.update_context, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_context: gapic_v1.method.wrap_method( + self.delete_context, + default_timeout=5.0, + client_info=client_info, + ), + self.purge_contexts: gapic_v1.method.wrap_method( + self.purge_contexts, + default_timeout=None, + client_info=client_info, + ), + self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( + self.add_context_artifacts_and_executions, + default_timeout=5.0, + client_info=client_info, + ), + self.add_context_children: gapic_v1.method.wrap_method( + self.add_context_children, + default_timeout=5.0, + client_info=client_info, + ), + self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_context_lineage_subgraph, + default_timeout=5.0, + client_info=client_info, + ), + self.create_execution: gapic_v1.method.wrap_method( + self.create_execution, + default_timeout=5.0, + client_info=client_info, + ), + self.get_execution: gapic_v1.method.wrap_method( + self.get_execution, + default_timeout=5.0, + client_info=client_info, + ), + self.list_executions: gapic_v1.method.wrap_method( + self.list_executions, + default_timeout=5.0, + client_info=client_info, + ), + self.update_execution: gapic_v1.method.wrap_method( + self.update_execution, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_execution: gapic_v1.method.wrap_method( + self.delete_execution, + default_timeout=None, + client_info=client_info, + ), + self.purge_executions: gapic_v1.method.wrap_method( + self.purge_executions, + default_timeout=None, + client_info=client_info, + ), + self.add_execution_events: gapic_v1.method.wrap_method( + self.add_execution_events, + default_timeout=5.0, + client_info=client_info, + ), + self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( + self.query_execution_inputs_and_outputs, + default_timeout=5.0, + client_info=client_info, + ), + self.create_metadata_schema: gapic_v1.method.wrap_method( + self.create_metadata_schema, + default_timeout=5.0, + client_info=client_info, + ), + self.get_metadata_schema: gapic_v1.method.wrap_method( + self.get_metadata_schema, + default_timeout=5.0, + client_info=client_info, + ), + self.list_metadata_schemas: gapic_v1.method.wrap_method( + self.list_metadata_schemas, + default_timeout=5.0, + client_info=client_info, + ), + self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Union[ + metadata_store.MetadataStore, + Awaitable[metadata_store.MetadataStore] + ]]: + raise NotImplementedError() + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Union[ + metadata_service.ListMetadataStoresResponse, + Awaitable[metadata_service.ListMetadataStoresResponse] + ]]: + raise NotImplementedError() + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + Union[ + gca_artifact.Artifact, + Awaitable[gca_artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + Union[ + artifact.Artifact, + Awaitable[artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + Union[ + metadata_service.ListArtifactsResponse, + Awaitable[metadata_service.ListArtifactsResponse] + ]]: + raise NotImplementedError() + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + Union[ + gca_artifact.Artifact, + Awaitable[gca_artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def delete_artifact(self) -> Callable[ + [metadata_service.DeleteArtifactRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def purge_artifacts(self) -> Callable[ + [metadata_service.PurgeArtifactsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + Union[ + gca_context.Context, + Awaitable[gca_context.Context] + ]]: + raise NotImplementedError() + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + Union[ + context.Context, + Awaitable[context.Context] + ]]: + raise NotImplementedError() + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + Union[ + metadata_service.ListContextsResponse, + Awaitable[metadata_service.ListContextsResponse] + ]]: + raise NotImplementedError() + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + Union[ + gca_context.Context, + Awaitable[gca_context.Context] + ]]: + raise NotImplementedError() + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def purge_contexts(self) -> Callable[ + [metadata_service.PurgeContextsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Union[ + metadata_service.AddContextArtifactsAndExecutionsResponse, + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse] + ]]: + raise NotImplementedError() + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Union[ + metadata_service.AddContextChildrenResponse, + Awaitable[metadata_service.AddContextChildrenResponse] + ]]: + raise NotImplementedError() + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + Union[ + gca_execution.Execution, + Awaitable[gca_execution.Execution] + ]]: + raise NotImplementedError() + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + Union[ + execution.Execution, + Awaitable[execution.Execution] + ]]: + raise NotImplementedError() + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + Union[ + metadata_service.ListExecutionsResponse, + Awaitable[metadata_service.ListExecutionsResponse] + ]]: + raise NotImplementedError() + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + Union[ + gca_execution.Execution, + Awaitable[gca_execution.Execution] + ]]: + raise NotImplementedError() + + @property + def delete_execution(self) -> Callable[ + [metadata_service.DeleteExecutionRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def purge_executions(self) -> Callable[ + [metadata_service.PurgeExecutionsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Union[ + metadata_service.AddExecutionEventsResponse, + Awaitable[metadata_service.AddExecutionEventsResponse] + ]]: + raise NotImplementedError() + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Union[ + gca_metadata_schema.MetadataSchema, + Awaitable[gca_metadata_schema.MetadataSchema] + ]]: + raise NotImplementedError() + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Union[ + metadata_schema.MetadataSchema, + Awaitable[metadata_schema.MetadataSchema] + ]]: + raise NotImplementedError() + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Union[ + metadata_service.ListMetadataSchemasResponse, + Awaitable[metadata_service.ListMetadataSchemasResponse] + ]]: + raise NotImplementedError() + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'MetadataServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py new file mode 100644 index 0000000000..e4be4b03c1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -0,0 +1,1301 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO + + +class MetadataServiceGrpcTransport(MetadataServiceTransport): + """gRPC backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_store' not in self._stubs: + self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_metadata_store'] + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + metadata_store.MetadataStore]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + ~.MetadataStore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_store' not in self._stubs: + self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs['get_metadata_store'] + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + metadata_service.ListMetadataStoresResponse]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + ~.ListMetadataStoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_stores' not in self._stubs: + self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs['list_metadata_stores'] + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_metadata_store' not in self._stubs: + self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_metadata_store'] + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + gca_artifact.Artifact]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_artifact' not in self._stubs: + self._stubs['create_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['create_artifact'] + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + artifact.Artifact]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_artifact' not in self._stubs: + self._stubs['get_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs['get_artifact'] + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + metadata_service.ListArtifactsResponse]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + ~.ListArtifactsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_artifacts' not in self._stubs: + self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs['list_artifacts'] + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + gca_artifact.Artifact]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_artifact' not in self._stubs: + self._stubs['update_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['update_artifact'] + + @property + def delete_artifact(self) -> Callable[ + [metadata_service.DeleteArtifactRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete artifact method over gRPC. + + Deletes an Artifact. + + Returns: + Callable[[~.DeleteArtifactRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_artifact' not in self._stubs: + self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteArtifact', + request_serializer=metadata_service.DeleteArtifactRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_artifact'] + + @property + def purge_artifacts(self) -> Callable[ + [metadata_service.PurgeArtifactsRequest], + operations_pb2.Operation]: + r"""Return a callable for the purge artifacts method over gRPC. + + Purges Artifacts. + + Returns: + Callable[[~.PurgeArtifactsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_artifacts' not in self._stubs: + self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeArtifacts', + request_serializer=metadata_service.PurgeArtifactsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_artifacts'] + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + gca_context.Context]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_context' not in self._stubs: + self._stubs['create_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['create_context'] + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + context.Context]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_context' not in self._stubs: + self._stubs['get_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs['get_context'] + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + metadata_service.ListContextsResponse]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + ~.ListContextsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_contexts' not in self._stubs: + self._stubs['list_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs['list_contexts'] + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + gca_context.Context]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_context' not in self._stubs: + self._stubs['update_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['update_context'] + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_context' not in self._stubs: + self._stubs['delete_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_context'] + + @property + def purge_contexts(self) -> Callable[ + [metadata_service.PurgeContextsRequest], + operations_pb2.Operation]: + r"""Return a callable for the purge contexts method over gRPC. + + Purges Contexts. + + Returns: + Callable[[~.PurgeContextsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_contexts' not in self._stubs: + self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeContexts', + request_serializer=metadata_service.PurgeContextsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_contexts'] + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + metadata_service.AddContextArtifactsAndExecutionsResponse]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + ~.AddContextArtifactsAndExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_artifacts_and_executions' not in self._stubs: + self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs['add_context_artifacts_and_executions'] + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + metadata_service.AddContextChildrenResponse]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + ~.AddContextChildrenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_children' not in self._stubs: + self._stubs['add_context_children'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs['add_context_children'] + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_context_lineage_subgraph' not in self._stubs: + self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_context_lineage_subgraph'] + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + gca_execution.Execution]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_execution' not in self._stubs: + self._stubs['create_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['create_execution'] + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + execution.Execution]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_execution' not in self._stubs: + self._stubs['get_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs['get_execution'] + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + metadata_service.ListExecutionsResponse]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + ~.ListExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_executions' not in self._stubs: + self._stubs['list_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs['list_executions'] + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + gca_execution.Execution]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_execution' not in self._stubs: + self._stubs['update_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['update_execution'] + + @property + def delete_execution(self) -> Callable[ + [metadata_service.DeleteExecutionRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete execution method over gRPC. + + Deletes an Execution. + + Returns: + Callable[[~.DeleteExecutionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_execution' not in self._stubs: + self._stubs['delete_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteExecution', + request_serializer=metadata_service.DeleteExecutionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_execution'] + + @property + def purge_executions(self) -> Callable[ + [metadata_service.PurgeExecutionsRequest], + operations_pb2.Operation]: + r"""Return a callable for the purge executions method over gRPC. + + Purges Executions. + + Returns: + Callable[[~.PurgeExecutionsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_executions' not in self._stubs: + self._stubs['purge_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeExecutions', + request_serializer=metadata_service.PurgeExecutionsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_executions'] + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + metadata_service.AddExecutionEventsResponse]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + ~.AddExecutionEventsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_execution_events' not in self._stubs: + self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs['add_execution_events'] + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_execution_inputs_and_outputs' not in self._stubs: + self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_execution_inputs_and_outputs'] + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + gca_metadata_schema.MetadataSchema]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_schema' not in self._stubs: + self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['create_metadata_schema'] + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + metadata_schema.MetadataSchema]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_schema' not in self._stubs: + self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['get_metadata_schema'] + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + metadata_service.ListMetadataSchemasResponse]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + ~.ListMetadataSchemasResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_schemas' not in self._stubs: + self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs['list_metadata_schemas'] + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_artifact_lineage_subgraph' not in self._stubs: + self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_artifact_lineage_subgraph'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'MetadataServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..1110b8635d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -0,0 +1,1300 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MetadataServiceGrpcTransport + + +class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): + """gRPC AsyncIO backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_store' not in self._stubs: + self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_metadata_store'] + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Awaitable[metadata_store.MetadataStore]]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + Awaitable[~.MetadataStore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_store' not in self._stubs: + self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs['get_metadata_store'] + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Awaitable[metadata_service.ListMetadataStoresResponse]]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + Awaitable[~.ListMetadataStoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_stores' not in self._stubs: + self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs['list_metadata_stores'] + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore and all its child + resources (Artifacts, Executions, and Contexts). + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_metadata_store' not in self._stubs: + self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_metadata_store'] + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + Awaitable[gca_artifact.Artifact]]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_artifact' not in self._stubs: + self._stubs['create_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['create_artifact'] + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + Awaitable[artifact.Artifact]]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_artifact' not in self._stubs: + self._stubs['get_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs['get_artifact'] + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + Awaitable[metadata_service.ListArtifactsResponse]]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + Awaitable[~.ListArtifactsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_artifacts' not in self._stubs: + self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs['list_artifacts'] + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + Awaitable[gca_artifact.Artifact]]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_artifact' not in self._stubs: + self._stubs['update_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['update_artifact'] + + @property + def delete_artifact(self) -> Callable[ + [metadata_service.DeleteArtifactRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete artifact method over gRPC. + + Deletes an Artifact. + + Returns: + Callable[[~.DeleteArtifactRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_artifact' not in self._stubs: + self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteArtifact', + request_serializer=metadata_service.DeleteArtifactRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_artifact'] + + @property + def purge_artifacts(self) -> Callable[ + [metadata_service.PurgeArtifactsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the purge artifacts method over gRPC. + + Purges Artifacts. + + Returns: + Callable[[~.PurgeArtifactsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_artifacts' not in self._stubs: + self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeArtifacts', + request_serializer=metadata_service.PurgeArtifactsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_artifacts'] + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + Awaitable[gca_context.Context]]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_context' not in self._stubs: + self._stubs['create_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['create_context'] + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + Awaitable[context.Context]]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_context' not in self._stubs: + self._stubs['get_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs['get_context'] + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + Awaitable[metadata_service.ListContextsResponse]]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + Awaitable[~.ListContextsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_contexts' not in self._stubs: + self._stubs['list_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs['list_contexts'] + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + Awaitable[gca_context.Context]]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_context' not in self._stubs: + self._stubs['update_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['update_context'] + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_context' not in self._stubs: + self._stubs['delete_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_context'] + + @property + def purge_contexts(self) -> Callable[ + [metadata_service.PurgeContextsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the purge contexts method over gRPC. + + Purges Contexts. + + Returns: + Callable[[~.PurgeContextsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_contexts' not in self._stubs: + self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeContexts', + request_serializer=metadata_service.PurgeContextsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_contexts'] + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse]]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_artifacts_and_executions' not in self._stubs: + self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs['add_context_artifacts_and_executions'] + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Awaitable[metadata_service.AddContextChildrenResponse]]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + Awaitable[~.AddContextChildrenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_children' not in self._stubs: + self._stubs['add_context_children'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs['add_context_children'] + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_context_lineage_subgraph' not in self._stubs: + self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_context_lineage_subgraph'] + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + Awaitable[gca_execution.Execution]]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_execution' not in self._stubs: + self._stubs['create_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['create_execution'] + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + Awaitable[execution.Execution]]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_execution' not in self._stubs: + self._stubs['get_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs['get_execution'] + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + Awaitable[metadata_service.ListExecutionsResponse]]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + Awaitable[~.ListExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_executions' not in self._stubs: + self._stubs['list_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs['list_executions'] + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + Awaitable[gca_execution.Execution]]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_execution' not in self._stubs: + self._stubs['update_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['update_execution'] + + @property + def delete_execution(self) -> Callable[ + [metadata_service.DeleteExecutionRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete execution method over gRPC. + + Deletes an Execution. + + Returns: + Callable[[~.DeleteExecutionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_execution' not in self._stubs: + self._stubs['delete_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteExecution', + request_serializer=metadata_service.DeleteExecutionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_execution'] + + @property + def purge_executions(self) -> Callable[ + [metadata_service.PurgeExecutionsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the purge executions method over gRPC. + + Purges Executions. + + Returns: + Callable[[~.PurgeExecutionsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'purge_executions' not in self._stubs: + self._stubs['purge_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeExecutions', + request_serializer=metadata_service.PurgeExecutionsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['purge_executions'] + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Awaitable[metadata_service.AddExecutionEventsResponse]]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + Awaitable[~.AddExecutionEventsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_execution_events' not in self._stubs: + self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs['add_execution_events'] + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_execution_inputs_and_outputs' not in self._stubs: + self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_execution_inputs_and_outputs'] + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Awaitable[gca_metadata_schema.MetadataSchema]]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_schema' not in self._stubs: + self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['create_metadata_schema'] + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Awaitable[metadata_schema.MetadataSchema]]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_schema' not in self._stubs: + self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['get_metadata_schema'] + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Awaitable[metadata_service.ListMetadataSchemasResponse]]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + Awaitable[~.ListMetadataSchemasResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_schemas' not in self._stubs: + self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs['list_metadata_schemas'] + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_artifact_lineage_subgraph' not in self._stubs: + self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_artifact_lineage_subgraph'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'MetadataServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py new file mode 100644 index 0000000000..bb6f4258df --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MigrationServiceClient +from .async_client import MigrationServiceAsyncClient + +__all__ = ( + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py new file mode 100644 index 0000000000..f5910f4373 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -0,0 +1,1109 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.migration_service import pagers +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport +from .client import MigrationServiceClient + + +class MigrationServiceAsyncClient: + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + _client: MigrationServiceClient + + DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT + + annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) + parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + version_path = staticmethod(MigrationServiceClient.version_path) + parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) + common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) + common_project_path = staticmethod(MigrationServiceClient.common_project_path) + parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) + common_location_path = staticmethod(MigrationServiceClient.common_location_path) + parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return MigrationServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MigrationServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def search_migratable_resources(self, + request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesAsyncPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest, dict]): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + parent (:class:`str`): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.SearchMigratableResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_migratable_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchMigratableResourcesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_migrate_resources(self, + request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1beta1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest, dict]): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + parent (:class:`str`): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]`): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.BatchMigrateResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests: + request.migrate_resource_requests.extend(migrate_resource_requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_migrate_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py new file mode 100644 index 0000000000..07e3a41b6f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -0,0 +1,1357 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.migration_service import pagers +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MigrationServiceGrpcTransport +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +class MigrationServiceClientMeta(type): + """Metaclass for the MigrationService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] + _transport_registry["grpc"] = MigrationServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MigrationServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MigrationServiceClient(metaclass=MigrationServiceClientMeta): + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: + """Returns a fully-qualified annotated_dataset string.""" + return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + + @staticmethod + def parse_annotated_dataset_path(path: str) -> Dict[str,str]: + """Parses a annotated_dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def version_path(project: str,model: str,version: str,) -> str: + """Returns a fully-qualified version string.""" + return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + + @staticmethod + def parse_version_path(path: str) -> Dict[str,str]: + """Parses a version path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MigrationServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MigrationServiceTransport): + # transport is a MigrationServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def search_migratable_resources(self, + request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest, dict]): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + parent (str): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.SearchMigratableResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.SearchMigratableResourcesRequest): + request = migration_service.SearchMigratableResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchMigratableResourcesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_migrate_resources(self, + request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1beta1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest, dict]): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + parent (str): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.BatchMigrateResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.BatchMigrateResourcesRequest): + request = migration_service.BatchMigrateResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests is not None: + request.migrate_resource_requests = migrate_resource_requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py new file mode 100644 index 0000000000..83aa01e87c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service + + +class SearchMigratableResourcesPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., migration_service.SearchMigratableResourcesResponse], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[migratable_resource.MigratableResource]: + for page in self.pages: + yield from page.migratable_resources + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchMigratableResourcesAsyncPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[migratable_resource.MigratableResource]: + async def async_generator(): + async for page in self.pages: + for response in page.migratable_resources: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py new file mode 100644 index 0000000000..2176366c5e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MigrationServiceTransport +from .grpc import MigrationServiceGrpcTransport +from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] +_transport_registry['grpc'] = MigrationServiceGrpcTransport +_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport + +__all__ = ( + 'MigrationServiceTransport', + 'MigrationServiceGrpcTransport', + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py new file mode 100644 index 0000000000..81b9d926d1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MigrationServiceTransport(abc.ABC): + """Abstract transport class for MigrationService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.search_migratable_resources: gapic_v1.method.wrap_method( + self.search_migratable_resources, + default_timeout=None, + client_info=client_info, + ), + self.batch_migrate_resources: gapic_v1.method.wrap_method( + self.batch_migrate_resources, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Union[ + migration_service.SearchMigratableResourcesResponse, + Awaitable[migration_service.SearchMigratableResourcesResponse] + ]]: + raise NotImplementedError() + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'MigrationServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py new file mode 100644 index 0000000000..dac37d2658 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -0,0 +1,522 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO + + +class MigrationServiceGrpcTransport(MigrationServiceTransport): + """gRPC backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + migration_service.SearchMigratableResourcesResponse]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + ~.SearchMigratableResourcesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'MigrationServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..1d02977d5a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py @@ -0,0 +1,521 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MigrationServiceGrpcTransport + + +class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): + """gRPC AsyncIO backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Awaitable[migration_service.SearchMigratableResourcesResponse]]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + Awaitable[~.SearchMigratableResourcesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py new file mode 100644 index 0000000000..c95c75b700 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ModelServiceClient +from .async_client import ModelServiceAsyncClient + +__all__ = ( + 'ModelServiceClient', + 'ModelServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py new file mode 100644 index 0000000000..92b8a74d09 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -0,0 +1,2571 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.model_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .client import ModelServiceClient + + +class ModelServiceAsyncClient: + """A service for managing Vertex AI's machine learning Models.""" + + _client: ModelServiceClient + + DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(ModelServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) + model_path = staticmethod(ModelServiceClient.model_path) + parse_model_path = staticmethod(ModelServiceClient.parse_model_path) + model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) + parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) + model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) + parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) + training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ModelServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(ModelServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) + common_project_path = staticmethod(ModelServiceClient.common_project_path) + parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) + common_location_path = staticmethod(ModelServiceClient.common_location_path) + parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ModelServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ModelServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ModelServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def upload_model(self, + request: Union[model_service.UploadModelRequest, dict] = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Uploads a Model artifact into Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_upload_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UploadModelRequest, dict]): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + parent (:class:`str`): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UploadModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.upload_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model(self, + request: Union[model_service.GetModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelRequest, dict]): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + name (:class:`str`): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + In order to retrieve a specific version of the model, + also provide the version ID or version alias. Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the "default" + version will be returned. The "default" version alias is + created for the first version of the model, and can be + moved to other versions later on. There will be exactly + one default version. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models(self, + request: Union[model_service.ListModelsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists Models in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_models(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelsRequest, dict]): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_versions(self, + request: Union[model_service.ListModelVersionsRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelVersionsAsyncPager: + r"""Lists versions of the specified model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest, dict]): + The request object. Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]. + name (:class:`str`): + Required. The name of the model to + list versions for. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsAsyncPager: + Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelVersionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_versions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelVersionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model(self, + request: Union[model_service.UpdateModelRequest, dict] = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelRequest, dict]): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UpdateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_explanation_dataset(self, + request: Union[model_service.UpdateExplanationDatasetRequest, dict] = None, + *, + model: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Incrementally update the dataset used for an examples + model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_explanation_dataset(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExplanationDatasetRequest( + model="model_value", + ) + + # Make the request + operation = client.update_explanation_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest, dict]): + The request object. Request message for + [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset]. + model (:class:`str`): + Required. The resource name of the Model to update. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetResponse` + Response message of + [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UpdateExplanationDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_explanation_dataset, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model", request.model), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.UpdateExplanationDatasetResponse, + metadata_type=model_service.UpdateExplanationDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_model(self, + request: Union[model_service.DeleteModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + name (:class:`str`): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_model_version(self, + request: Union[model_service.DeleteModelVersionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest, dict]): + The request object. Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion]. + name (:class:`str`): + Required. The name of the model version to be deleted, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.DeleteModelVersionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_version, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def merge_version_aliases(self, + request: Union[model_service.MergeVersionAliasesRequest, dict] = None, + *, + name: str = None, + version_aliases: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Merges a set of aliases for a Model version. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = await client.merge_version_aliases(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest, dict]): + The request object. Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases]. + name (:class:`str`): + Required. The name of the model version to merge + aliases, with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + version_aliases (:class:`Sequence[str]`): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix + to an alias means removing that alias from the version. + ``-`` is NOT counted in the 128 characters. Example: + ``-golden`` means removing the ``golden`` alias from the + version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have + the exactly same order from this MergeVersionAliases + API. 2) Adding and deleting the same alias in the + request is not recommended, and the 2 operations will + be cancelled out. + + This corresponds to the ``version_aliases`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, version_aliases]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.MergeVersionAliasesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if version_aliases: + request.version_aliases.extend(version_aliases) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.merge_version_aliases, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def export_model(self, + request: Union[model_service.ExportModelRequest, dict] = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_export_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportModelRequest, dict]): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + name (:class:`str`): + Required. The resource name of the + Model to export. The resource name may + contain version id or version alias to + specify the version, if no version is + specified, the default version will be + exported. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig`): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_model_evaluation(self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = await client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation] + parent (:class:`str`): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (:class:`google.cloud.aiplatform_v1beta1.types.ModelEvaluation`): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ImportModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_model_evaluation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation(self, + request: Union[model_service.GetModelEvaluationRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + name (:class:`str`): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluations(self, + request: Union[model_service.ListModelEvaluationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists ModelEvaluations in a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + parent (:class:`str`): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation_slice(self, + request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + name (:class:`str`): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationSliceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation_slice, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluation_slices(self, + request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesAsyncPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + parent (:class:`str`): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationSlicesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluation_slices, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationSlicesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py new file mode 100644 index 0000000000..f2a69ee012 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -0,0 +1,2801 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.model_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ModelServiceGrpcTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +class ModelServiceClientMeta(type): + """Metaclass for the ModelService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] + _transport_registry["grpc"] = ModelServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ModelServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ModelServiceClient(metaclass=ModelServiceClientMeta): + """A service for managing Vertex AI's machine learning Models.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: + """Returns a fully-qualified model_evaluation string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + + @staticmethod + def parse_model_evaluation_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: + """Returns a fully-qualified model_evaluation_slice string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + + @staticmethod + def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation_slice path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ModelServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ModelServiceTransport): + # transport is a ModelServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def upload_model(self, + request: Union[model_service.UploadModelRequest, dict] = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Uploads a Model artifact into Vertex AI. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_upload_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UploadModelRequest, dict]): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + parent (str): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UploadModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UploadModelRequest): + request = model_service.UploadModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.upload_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + def get_model(self, + request: Union[model_service.GetModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelRequest, dict]): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + In order to retrieve a specific version of the model, + also provide the version ID or version alias. Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the "default" + version will be returned. The "default" version alias is + created for the first version of the model, and can be + moved to other versions later on. There will be exactly + one default version. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelRequest): + request = model_service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models(self, + request: Union[model_service.ListModelsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists Models in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_models(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelsRequest, dict]): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelsRequest): + request = model_service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_versions(self, + request: Union[model_service.ListModelVersionsRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelVersionsPager: + r"""Lists versions of the specified model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest, dict]): + The request object. Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]. + name (str): + Required. The name of the model to + list versions for. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsPager: + Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelVersionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelVersionsRequest): + request = model_service.ListModelVersionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_versions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelVersionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model(self, + request: Union[model_service.UpdateModelRequest, dict] = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = client.update_model(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelRequest, dict]): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, + refers to a version specific update. + 2. model.name without the @ value, e.g. models/123, + refers to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a + model update. + 4. Supported model fields: display_name, description; + supported version-specific fields: + version_description. Labels are supported in both + scenarios. Both the model labels and the version + labels are merged when a model is returned. When + updating labels, if the request is for model-specific + update, model label gets updated. Otherwise, version + labels get updated. + 5. A model name or model version name fields update + mismatch will cause a precondition error. + 6. One request cannot update both the model and the + version fields. You must update them separately. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UpdateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UpdateModelRequest): + request = model_service.UpdateModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_explanation_dataset(self, + request: Union[model_service.UpdateExplanationDatasetRequest, dict] = None, + *, + model: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Incrementally update the dataset used for an examples + model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_explanation_dataset(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExplanationDatasetRequest( + model="model_value", + ) + + # Make the request + operation = client.update_explanation_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest, dict]): + The request object. Request message for + [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset]. + model (str): + Required. The resource name of the Model to update. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetResponse` + Response message of + [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UpdateExplanationDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UpdateExplanationDatasetRequest): + request = model_service.UpdateExplanationDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_explanation_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model", request.model), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.UpdateExplanationDatasetResponse, + metadata_type=model_service.UpdateExplanationDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_model(self, + request: Union[model_service.DeleteModelRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteModelRequest): + request = model_service.DeleteModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_model_version(self, + request: Union[model_service.DeleteModelVersionRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest, dict]): + The request object. Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion]. + name (str): + Required. The name of the model version to be deleted, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteModelVersionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteModelVersionRequest): + request = model_service.DeleteModelVersionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model_version] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def merge_version_aliases(self, + request: Union[model_service.MergeVersionAliasesRequest, dict] = None, + *, + name: str = None, + version_aliases: Sequence[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Merges a set of aliases for a Model version. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = client.merge_version_aliases(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest, dict]): + The request object. Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases]. + name (str): + Required. The name of the model version to merge + aliases, with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + version_aliases (Sequence[str]): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix + to an alias means removing that alias from the version. + ``-`` is NOT counted in the 128 characters. Example: + ``-golden`` means removing the ``golden`` alias from the + version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have + the exactly same order from this MergeVersionAliases + API. 2) Adding and deleting the same alias in the + request is not recommended, and the 2 operations will + be cancelled out. + + This corresponds to the ``version_aliases`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, version_aliases]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.MergeVersionAliasesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.MergeVersionAliasesRequest): + request = model_service.MergeVersionAliasesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if version_aliases is not None: + request.version_aliases = version_aliases + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.merge_version_aliases] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def export_model(self, + request: Union[model_service.ExportModelRequest, dict] = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_export_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportModelRequest, dict]): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + name (str): + Required. The resource name of the + Model to export. The resource name may + contain version id or version alias to + specify the version, if no version is + specified, the default version will be + exported. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ExportModelRequest): + request = model_service.ExportModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + def import_model_evaluation(self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation] + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (google.cloud.aiplatform_v1beta1.types.ModelEvaluation): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ImportModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ImportModelEvaluationRequest): + request = model_service.ImportModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation(self, + request: Union[model_service.GetModelEvaluationRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + name (str): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationRequest): + request = model_service.GetModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluations(self, + request: Union[model_service.ListModelEvaluationsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists ModelEvaluations in a Model. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationsRequest): + request = model_service.ListModelEvaluationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation_slice(self, + request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest, dict]): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationSliceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationSliceRequest): + request = model_service.GetModelEvaluationSliceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluation_slices(self, + request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest, dict]): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + parent (str): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationSlicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): + request = model_service.ListModelEvaluationSlicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationSlicesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py new file mode 100644 index 0000000000..62cef7be58 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py @@ -0,0 +1,505 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelsResponse], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelsResponse]], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelVersionsPager: + """A pager for iterating through ``list_model_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelVersions`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelVersionsResponse], + request: model_service.ListModelVersionsRequest, + response: model_service.ListModelVersionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelVersionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelVersionsAsyncPager: + """A pager for iterating through ``list_model_versions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelVersions`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelVersionsResponse]], + request: model_service.ListModelVersionsRequest, + response: model_service.ListModelVersionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelVersionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelVersionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelVersionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationsResponse], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationSlicesResponse], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_evaluation_slice.ModelEvaluationSlice]: + for page in self.pages: + yield from page.model_evaluation_slices + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesAsyncPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[model_evaluation_slice.ModelEvaluationSlice]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation_slices: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py new file mode 100644 index 0000000000..70c39ff384 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ModelServiceTransport +from .grpc import ModelServiceGrpcTransport +from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] +_transport_registry['grpc'] = ModelServiceGrpcTransport +_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport + +__all__ = ( + 'ModelServiceTransport', + 'ModelServiceGrpcTransport', + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py new file mode 100644 index 0000000000..30aaf91ddf --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -0,0 +1,453 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ModelServiceTransport(abc.ABC): + """Abstract transport class for ModelService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.upload_model: gapic_v1.method.wrap_method( + self.upload_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_timeout=5.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_versions: gapic_v1.method.wrap_method( + self.list_model_versions, + default_timeout=None, + client_info=client_info, + ), + self.update_model: gapic_v1.method.wrap_method( + self.update_model, + default_timeout=5.0, + client_info=client_info, + ), + self.update_explanation_dataset: gapic_v1.method.wrap_method( + self.update_explanation_dataset, + default_timeout=None, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model_version: gapic_v1.method.wrap_method( + self.delete_model_version, + default_timeout=None, + client_info=client_info, + ), + self.merge_version_aliases: gapic_v1.method.wrap_method( + self.merge_version_aliases, + default_timeout=None, + client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, + default_timeout=5.0, + client_info=client_info, + ), + self.import_model_evaluation: gapic_v1.method.wrap_method( + self.import_model_evaluation, + default_timeout=None, + client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation_slice: gapic_v1.method.wrap_method( + self.get_model_evaluation_slice, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluation_slices: gapic_v1.method.wrap_method( + self.list_model_evaluation_slices, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Union[ + model_service.ListModelsResponse, + Awaitable[model_service.ListModelsResponse] + ]]: + raise NotImplementedError() + + @property + def list_model_versions(self) -> Callable[ + [model_service.ListModelVersionsRequest], + Union[ + model_service.ListModelVersionsResponse, + Awaitable[model_service.ListModelVersionsResponse] + ]]: + raise NotImplementedError() + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Union[ + gca_model.Model, + Awaitable[gca_model.Model] + ]]: + raise NotImplementedError() + + @property + def update_explanation_dataset(self) -> Callable[ + [model_service.UpdateExplanationDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_model_version(self) -> Callable[ + [model_service.DeleteModelVersionRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def merge_version_aliases(self) -> Callable[ + [model_service.MergeVersionAliasesRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_model_evaluation(self) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Union[ + gca_model_evaluation.ModelEvaluation, + Awaitable[gca_model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Union[ + model_evaluation.ModelEvaluation, + Awaitable[model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Union[ + model_service.ListModelEvaluationsResponse, + Awaitable[model_service.ListModelEvaluationsResponse] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Union[ + model_evaluation_slice.ModelEvaluationSlice, + Awaitable[model_evaluation_slice.ModelEvaluationSlice] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Union[ + model_service.ListModelEvaluationSlicesResponse, + Awaitable[model_service.ListModelEvaluationSlicesResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'ModelServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py new file mode 100644 index 0000000000..2dc0be6b25 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -0,0 +1,877 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO + + +class ModelServiceGrpcTransport(ModelServiceTransport): + """gRPC backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + model_service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def list_model_versions(self) -> Callable[ + [model_service.ListModelVersionsRequest], + model_service.ListModelVersionsResponse]: + r"""Return a callable for the list model versions method over gRPC. + + Lists versions of the specified model. + + Returns: + Callable[[~.ListModelVersionsRequest], + ~.ListModelVersionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_versions' not in self._stubs: + self._stubs['list_model_versions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelVersions', + request_serializer=model_service.ListModelVersionsRequest.serialize, + response_deserializer=model_service.ListModelVersionsResponse.deserialize, + ) + return self._stubs['list_model_versions'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + gca_model.Model]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def update_explanation_dataset(self) -> Callable[ + [model_service.UpdateExplanationDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the update explanation dataset method over gRPC. + + Incrementally update the dataset used for an examples + model. + + Returns: + Callable[[~.UpdateExplanationDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_explanation_dataset' not in self._stubs: + self._stubs['update_explanation_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UpdateExplanationDataset', + request_serializer=model_service.UpdateExplanationDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_explanation_dataset'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def delete_model_version(self) -> Callable[ + [model_service.DeleteModelVersionRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model version method over gRPC. + + Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + Returns: + Callable[[~.DeleteModelVersionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_version' not in self._stubs: + self._stubs['delete_model_version'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModelVersion', + request_serializer=model_service.DeleteModelVersionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_version'] + + @property + def merge_version_aliases(self) -> Callable[ + [model_service.MergeVersionAliasesRequest], + model.Model]: + r"""Return a callable for the merge version aliases method over gRPC. + + Merges a set of aliases for a Model version. + + Returns: + Callable[[~.MergeVersionAliasesRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'merge_version_aliases' not in self._stubs: + self._stubs['merge_version_aliases'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/MergeVersionAliases', + request_serializer=model_service.MergeVersionAliasesRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['merge_version_aliases'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def import_model_evaluation(self) -> Callable[ + [model_service.ImportModelEvaluationRequest], + gca_model_evaluation.ModelEvaluation]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_model_evaluation' not in self._stubs: + self._stubs['import_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ImportModelEvaluation', + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['import_model_evaluation'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + model_service.ListModelEvaluationsResponse]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + model_evaluation_slice.ModelEvaluationSlice]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + ~.ModelEvaluationSlice]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + model_service.ListModelEvaluationSlicesResponse]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + ~.ListModelEvaluationSlicesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'ModelServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..8e77bdf3c8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -0,0 +1,876 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ModelServiceGrpcTransport + + +class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): + """gRPC AsyncIO backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Awaitable[model_service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def list_model_versions(self) -> Callable[ + [model_service.ListModelVersionsRequest], + Awaitable[model_service.ListModelVersionsResponse]]: + r"""Return a callable for the list model versions method over gRPC. + + Lists versions of the specified model. + + Returns: + Callable[[~.ListModelVersionsRequest], + Awaitable[~.ListModelVersionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_versions' not in self._stubs: + self._stubs['list_model_versions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelVersions', + request_serializer=model_service.ListModelVersionsRequest.serialize, + response_deserializer=model_service.ListModelVersionsResponse.deserialize, + ) + return self._stubs['list_model_versions'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Awaitable[gca_model.Model]]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def update_explanation_dataset(self) -> Callable[ + [model_service.UpdateExplanationDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update explanation dataset method over gRPC. + + Incrementally update the dataset used for an examples + model. + + Returns: + Callable[[~.UpdateExplanationDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_explanation_dataset' not in self._stubs: + self._stubs['update_explanation_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UpdateExplanationDataset', + request_serializer=model_service.UpdateExplanationDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_explanation_dataset'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def delete_model_version(self) -> Callable[ + [model_service.DeleteModelVersionRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model version method over gRPC. + + Deletes a Model version. + + Model version can only be deleted if there are no + [DeployedModels][] created from it. Deleting the only version in + the Model is not allowed. Use + [DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel] + for deleting the Model instead. + + Returns: + Callable[[~.DeleteModelVersionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_version' not in self._stubs: + self._stubs['delete_model_version'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModelVersion', + request_serializer=model_service.DeleteModelVersionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_version'] + + @property + def merge_version_aliases(self) -> Callable[ + [model_service.MergeVersionAliasesRequest], + Awaitable[model.Model]]: + r"""Return a callable for the merge version aliases method over gRPC. + + Merges a set of aliases for a Model version. + + Returns: + Callable[[~.MergeVersionAliasesRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'merge_version_aliases' not in self._stubs: + self._stubs['merge_version_aliases'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/MergeVersionAliases', + request_serializer=model_service.MergeVersionAliasesRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['merge_version_aliases'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def import_model_evaluation(self) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Awaitable[gca_model_evaluation.ModelEvaluation]]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_model_evaluation' not in self._stubs: + self._stubs['import_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ImportModelEvaluation', + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['import_model_evaluation'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Awaitable[model_evaluation.ModelEvaluation]]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Awaitable[model_service.ListModelEvaluationsResponse]]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + Awaitable[~.ModelEvaluationSlice]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Awaitable[model_service.ListModelEvaluationSlicesResponse]]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + Awaitable[~.ListModelEvaluationSlicesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py new file mode 100644 index 0000000000..88e994d789 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PipelineServiceClient +from .async_client import PipelineServiceAsyncClient + +__all__ = ( + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py new file mode 100644 index 0000000000..b3db644f88 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -0,0 +1,1953 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport +from .client import PipelineServiceClient + + +class PipelineServiceAsyncClient: + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + """ + + _client: PipelineServiceClient + + DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(PipelineServiceClient.artifact_path) + parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) + context_path = staticmethod(PipelineServiceClient.context_path) + parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) + custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) + endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) + execution_path = staticmethod(PipelineServiceClient.execution_path) + parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) + model_path = staticmethod(PipelineServiceClient.model_path) + parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) + network_path = staticmethod(PipelineServiceClient.network_path) + parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) + training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PipelineServiceClient.common_project_path) + parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) + common_location_path = staticmethod(PipelineServiceClient.common_location_path) + parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PipelineServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PipelineServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_training_pipeline(self, + request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1beta1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = await client.create_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + parent (:class:`str`): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (:class:`google.cloud.aiplatform_v1beta1.types.TrainingPipeline`): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreateTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_training_pipeline(self, + request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = await client.get_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_training_pipelines(self, + request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesAsyncPager: + r"""Lists TrainingPipelines in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest, dict]): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + parent (:class:`str`): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListTrainingPipelinesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_training_pipelines, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTrainingPipelinesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_training_pipeline(self, + request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeleteTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_training_pipeline(self, + request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + await client.cancel_training_pipeline(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_pipeline_job(self, + request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (:class:`google.cloud.aiplatform_v1beta1.types.PipelineJob`): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (:class:`str`): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_pipeline_job(self, + request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_pipeline_jobs(self, + request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsAsyncPager: + r"""Lists PipelineJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest, dict]): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_pipeline_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPipelineJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_pipeline_job(self, + request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_pipeline_job(self, + request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_pipeline_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py new file mode 100644 index 0000000000..dc484f60df --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -0,0 +1,2219 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PipelineServiceGrpcTransport +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +class PipelineServiceClientMeta(type): + """Metaclass for the PipelineService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] + _transport_registry["grpc"] = PipelineServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PipelineServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PipelineServiceClient(metaclass=PipelineServiceClientMeta): + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str,str]: + """Parses a artifact path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str,str]: + """Parses a context path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str,str]: + """Parses a execution path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: + """Returns a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str,str]: + """Parses a pipeline_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PipelineServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PipelineServiceTransport): + # transport is a PipelineServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_training_pipeline(self, + request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1beta1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = client.create_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + parent (str): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreateTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): + request = pipeline_service.CreateTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_training_pipeline(self, + request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = client.get_training_pipeline(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): + request = pipeline_service.GetTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_training_pipelines(self, + request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesPager: + r"""Lists TrainingPipelines in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest, dict]): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListTrainingPipelinesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): + request = pipeline_service.ListTrainingPipelinesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTrainingPipelinesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_training_pipeline(self, + request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TrainingPipeline. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeleteTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): + request = pipeline_service.DeleteTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_training_pipeline(self, + request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + client.cancel_training_pipeline(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest, dict]): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): + request = pipeline_service.CancelTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_pipeline_job(self, + request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + parent (str): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreatePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreatePipelineJobRequest): + request = pipeline_service.CreatePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_pipeline_job(self, + request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_pipeline_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetPipelineJobRequest): + request = pipeline_service.GetPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_pipeline_jobs(self, + request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsPager: + r"""Lists PipelineJobs in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest, dict]): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListPipelineJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListPipelineJobsRequest): + request = pipeline_service.ListPipelineJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPipelineJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_pipeline_job(self, + request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PipelineJob. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeletePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeletePipelineJobRequest): + request = pipeline_service.DeletePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_pipeline_job(self, + request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_pipeline_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest, dict]): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelPipelineJobRequest): + request = pipeline_service.CancelPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py new file mode 100644 index 0000000000..20cecd5f27 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline + + +class ListTrainingPipelinesPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[training_pipeline.TrainingPipeline]: + for page in self.pages: + yield from page.training_pipelines + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrainingPipelinesAsyncPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[training_pipeline.TrainingPipeline]: + async def async_generator(): + async for page in self.pages: + for response in page.training_pipelines: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListPipelineJobsResponse], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[pipeline_job.PipelineJob]: + for page in self.pages: + yield from page.pipeline_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsAsyncPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[pipeline_job.PipelineJob]: + async def async_generator(): + async for page in self.pages: + for response in page.pipeline_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py new file mode 100644 index 0000000000..da3e9606ee --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PipelineServiceTransport +from .grpc import PipelineServiceGrpcTransport +from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] +_transport_registry['grpc'] = PipelineServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport + +__all__ = ( + 'PipelineServiceTransport', + 'PipelineServiceGrpcTransport', + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py new file mode 100644 index 0000000000..45e721e886 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PipelineServiceTransport(abc.ABC): + """Abstract transport class for PipelineService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_training_pipeline: gapic_v1.method.wrap_method( + self.create_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.get_training_pipeline: gapic_v1.method.wrap_method( + self.get_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.list_training_pipelines: gapic_v1.method.wrap_method( + self.list_training_pipelines, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_training_pipeline: gapic_v1.method.wrap_method( + self.delete_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_training_pipeline: gapic_v1.method.wrap_method( + self.cancel_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.create_pipeline_job: gapic_v1.method.wrap_method( + self.create_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.get_pipeline_job: gapic_v1.method.wrap_method( + self.get_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.list_pipeline_jobs: gapic_v1.method.wrap_method( + self.list_pipeline_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_pipeline_job: gapic_v1.method.wrap_method( + self.delete_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_pipeline_job: gapic_v1.method.wrap_method( + self.cancel_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Union[ + gca_training_pipeline.TrainingPipeline, + Awaitable[gca_training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Union[ + training_pipeline.TrainingPipeline, + Awaitable[training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Union[ + pipeline_service.ListTrainingPipelinesResponse, + Awaitable[pipeline_service.ListTrainingPipelinesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Union[ + gca_pipeline_job.PipelineJob, + Awaitable[gca_pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Union[ + pipeline_job.PipelineJob, + Awaitable[pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Union[ + pipeline_service.ListPipelineJobsResponse, + Awaitable[pipeline_service.ListPipelineJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'PipelineServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py new file mode 100644 index 0000000000..46d355088e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -0,0 +1,758 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO + + +class PipelineServiceGrpcTransport(PipelineServiceTransport): + """gRPC backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + gca_training_pipeline.TrainingPipeline]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + training_pipeline.TrainingPipeline]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + pipeline_service.ListTrainingPipelinesResponse]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + ~.ListTrainingPipelinesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + gca_pipeline_job.PipelineJob]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + pipeline_job.PipelineJob]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + pipeline_service.ListPipelineJobsResponse]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + ~.ListPipelineJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'PipelineServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..bb26301e06 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -0,0 +1,757 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PipelineServiceGrpcTransport + + +class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): + """gRPC AsyncIO backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex AI + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Awaitable[gca_training_pipeline.TrainingPipeline]]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Awaitable[training_pipeline.TrainingPipeline]]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + Awaitable[~.ListTrainingPipelinesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Awaitable[gca_pipeline_job.PipelineJob]]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Awaitable[pipeline_job.PipelineJob]]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Awaitable[pipeline_service.ListPipelineJobsResponse]]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + Awaitable[~.ListPipelineJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py new file mode 100644 index 0000000000..7c079c80cc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient + +__all__ = ( + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py new file mode 100644 index 0000000000..d43d09cdbb --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -0,0 +1,1316 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """A service for online predictions and explanations.""" + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) + model_path = staticmethod(PredictionServiceClient.model_path) + parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) + common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PredictionServiceClient.common_project_path) + parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) + common_location_path = staticmethod(PredictionServiceClient.common_location_path) + parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PredictionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def predict(self, + request: Union[prediction_service.PredictRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PredictRequest, dict]): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if instances: + request.instances.extend(instances) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def raw_predict(self, + request: Union[prediction_service.RawPredictRequest, dict] = None, + *, + endpoint: str = None, + http_body: httpbody_pb2.HttpBody = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> httpbody_pb2.HttpBody: + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_raw_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = await client.raw_predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): + The request object. Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (:class:`google.api.httpbody_pb2.HttpBody`): + The prediction input. Supports HTTP headers and + arbitrary data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for + an AutoML model, the + [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for + a custom-trained model, the behavior varies depending on + the model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1beta1.Model]. This + schema applies when you deploy the ``Model`` as a + ``DeployedModel`` to an + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and + use the ``RawPredict`` method. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api.httpbody_pb2.HttpBody: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) + returns (google.api.HttpBody); + + rpc UpdateResource(google.api.HttpBody) + returns (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.RawPredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.raw_predict, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def explain(self, + request: Union[prediction_service.ExplainRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.ExplainResponse: + r"""Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_explain(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.explain(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExplainRequest, dict]): + The request object. Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper + limit on the number of instances it supports per + request, and when it is exceeded the explanation call + errors in case of AutoML Models, or, in case of customer + created Models, the behaviour is as documented by that + Model. The schema of any single instance may be + specified via Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ExplainResponse: + Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.ExplainRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + if instances: + request.instances.extend(instances) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.explain, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py new file mode 100644 index 0000000000..371ff09444 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -0,0 +1,1519 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PredictionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """A service for online predictions and explanations.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PredictionServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def predict(self, + request: Union[prediction_service.PredictRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.PredictRequest, dict]): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if instances is not None: + request.instances.extend(instances) + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def raw_predict(self, + request: Union[prediction_service.RawPredictRequest, dict] = None, + *, + endpoint: str = None, + http_body: httpbody_pb2.HttpBody = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> httpbody_pb2.HttpBody: + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_raw_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = client.raw_predict(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): + The request object. Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (google.api.httpbody_pb2.HttpBody): + The prediction input. Supports HTTP headers and + arbitrary data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for + an AutoML model, the + [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for + a custom-trained model, the behavior varies depending on + the model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1beta1.Model]. This + schema applies when you deploy the ``Model`` as a + ``DeployedModel`` to an + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and + use the ``RawPredict`` method. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api.httpbody_pb2.HttpBody: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) + returns (google.api.HttpBody); + + rpc UpdateResource(google.api.HttpBody) + returns (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.RawPredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.RawPredictRequest): + request = prediction_service.RawPredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.raw_predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def explain(self, + request: Union[prediction_service.ExplainRequest, dict] = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + deployed_model_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.ExplainResponse: + r"""Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_explain(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.explain(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExplainRequest, dict]): + The request object. Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper + limit on the number of instances it supports per + request, and when it is exceeded the explanation call + errors in case of AutoML Models, or, in case of customer + created Models, the behaviour is as documented by that + Model. The schema of any single instance may be + specified via Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ExplainResponse: + Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.ExplainRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.ExplainRequest): + request = prediction_service.ExplainRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if instances is not None: + request.instances.extend(instances) + if parameters is not None: + request.parameters = parameters + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.explain] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py new file mode 100644 index 0000000000..bb65c664b5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry['grpc'] = PredictionServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport + +__all__ = ( + 'PredictionServiceTransport', + 'PredictionServiceGrpcTransport', + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py new file mode 100644 index 0000000000..e912ab6815 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, + default_timeout=5.0, + client_info=client_info, + ), + self.raw_predict: gapic_v1.method.wrap_method( + self.raw_predict, + default_timeout=None, + client_info=client_info, + ), + self.explain: gapic_v1.method.wrap_method( + self.explain, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Union[ + prediction_service.PredictResponse, + Awaitable[prediction_service.PredictResponse] + ]]: + raise NotImplementedError() + + @property + def raw_predict(self) -> Callable[ + [prediction_service.RawPredictRequest], + Union[ + httpbody_pb2.HttpBody, + Awaitable[httpbody_pb2.HttpBody] + ]]: + raise NotImplementedError() + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + Union[ + prediction_service.ExplainResponse, + Awaitable[prediction_service.ExplainResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'PredictionServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py new file mode 100644 index 0000000000..15c3c76692 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -0,0 +1,545 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def raw_predict(self) -> Callable[ + [prediction_service.RawPredictRequest], + httpbody_pb2.HttpBody]: + r"""Return a callable for the raw predict method over gRPC. + + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. + + Returns: + Callable[[~.RawPredictRequest], + ~.HttpBody]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'raw_predict' not in self._stubs: + self._stubs['raw_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/RawPredict', + request_serializer=prediction_service.RawPredictRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs['raw_predict'] + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + prediction_service.ExplainResponse]: + r"""Return a callable for the explain method over gRPC. + + Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Returns: + Callable[[~.ExplainRequest], + ~.ExplainResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'explain' not in self._stubs: + self._stubs['explain'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', + request_serializer=prediction_service.ExplainRequest.serialize, + response_deserializer=prediction_service.ExplainResponse.deserialize, + ) + return self._stubs['explain'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'PredictionServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..ae0ceab5ed --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,544 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse]]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def raw_predict(self) -> Callable[ + [prediction_service.RawPredictRequest], + Awaitable[httpbody_pb2.HttpBody]]: + r"""Return a callable for the raw predict method over gRPC. + + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. + + Returns: + Callable[[~.RawPredictRequest], + Awaitable[~.HttpBody]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'raw_predict' not in self._stubs: + self._stubs['raw_predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/RawPredict', + request_serializer=prediction_service.RawPredictRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs['raw_predict'] + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + Awaitable[prediction_service.ExplainResponse]]: + r"""Return a callable for the explain method over gRPC. + + Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Returns: + Callable[[~.ExplainRequest], + Awaitable[~.ExplainResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'explain' not in self._stubs: + self._stubs['explain'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', + request_serializer=prediction_service.ExplainRequest.serialize, + response_deserializer=prediction_service.ExplainResponse.deserialize, + ) + return self._stubs['explain'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py new file mode 100644 index 0000000000..9fb268e877 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SpecialistPoolServiceClient +from .async_client import SpecialistPoolServiceAsyncClient + +__all__ = ( + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py new file mode 100644 index 0000000000..8943482a6c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -0,0 +1,1452 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport +from .client import SpecialistPoolServiceClient + + +class SpecialistPoolServiceAsyncClient: + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + _client: SpecialistPoolServiceClient + + DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT + + specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) + parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) + common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) + common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) + parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) + common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) + parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return SpecialistPoolServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = SpecialistPoolServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_specialist_pool(self, + request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + parent (:class:`str`): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_specialist_pool(self, + request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = await client.get_specialist_pool(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + name (:class:`str`): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers and workers. + Managers are responsible for managing + the workers in this pool as well as + customers' data labeling jobs associated + with this pool. Customers create + specialist pool as well as start data + labeling jobs on Cloud, managers and + workers handle the jobs using + CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.GetSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_specialist_pools(self, + request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsAsyncPager: + r"""Lists SpecialistPools in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest, dict]): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + parent (:class:`str`): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_specialist_pools, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSpecialistPoolsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_specialist_pool(self, + request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + name (:class:`str`): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_specialist_pool(self, + request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py new file mode 100644 index 0000000000..4d27ffbecc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -0,0 +1,1646 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SpecialistPoolServiceGrpcTransport +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +class SpecialistPoolServiceClientMeta(type): + """Metaclass for the SpecialistPoolService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] + _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport + _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SpecialistPoolServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: + """Returns a fully-qualified specialist_pool string.""" + return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + + @staticmethod + def parse_specialist_pool_path(path: str) -> Dict[str,str]: + """Parses a specialist_pool path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SpecialistPoolServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpecialistPoolServiceTransport): + # transport is a SpecialistPoolServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_specialist_pool(self, + request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.CreateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + def get_specialist_pool(self, + request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = client.get_specialist_pool(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + name (str): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers and workers. + Managers are responsible for managing + the workers in this pool as well as + customers' data labeling jobs associated + with this pool. Customers create + specialist pool as well as start data + labeling jobs on Cloud, managers and + workers handle the jobs using + CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.GetSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): + request = specialist_pool_service.GetSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_specialist_pools(self, + request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsPager: + r"""Lists SpecialistPools in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest, dict]): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + parent (str): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.ListSpecialistPoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSpecialistPoolsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_specialist_pool(self, + request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + name (str): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.DeleteSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def update_specialist_pool(self, + request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a SpecialistPool. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest, dict]): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for + managing the workers in this pool as well as + customers' data labeling jobs associated with this + pool. Customers create specialist pool as well as + start data labeling jobs on Cloud, managers and + workers handle the jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.UpdateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py new file mode 100644 index 0000000000..6915cd6ea3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service + + +class ListSpecialistPoolsPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[specialist_pool.SpecialistPool]: + for page in self.pages: + yield from page.specialist_pools + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListSpecialistPoolsAsyncPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[specialist_pool.SpecialistPool]: + async def async_generator(): + async for page in self.pages: + for response in page.specialist_pools: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py new file mode 100644 index 0000000000..8e501c9548 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpecialistPoolServiceTransport +from .grpc import SpecialistPoolServiceGrpcTransport +from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] +_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport +_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport + +__all__ = ( + 'SpecialistPoolServiceTransport', + 'SpecialistPoolServiceGrpcTransport', + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py new file mode 100644 index 0000000000..921c18b5c5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SpecialistPoolServiceTransport(abc.ABC): + """Abstract transport class for SpecialistPoolService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_specialist_pool: gapic_v1.method.wrap_method( + self.create_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.get_specialist_pool: gapic_v1.method.wrap_method( + self.get_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.list_specialist_pools: gapic_v1.method.wrap_method( + self.list_specialist_pools, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_specialist_pool: gapic_v1.method.wrap_method( + self.delete_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.update_specialist_pool: gapic_v1.method.wrap_method( + self.update_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Union[ + specialist_pool.SpecialistPool, + Awaitable[specialist_pool.SpecialistPool] + ]]: + raise NotImplementedError() + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Union[ + specialist_pool_service.ListSpecialistPoolsResponse, + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'SpecialistPoolServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py new file mode 100644 index 0000000000..157ae73704 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py @@ -0,0 +1,601 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO + + +class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): + """gRPC backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + specialist_pool.SpecialistPool]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + ~.SpecialistPool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + specialist_pool_service.ListSpecialistPoolsResponse]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + ~.ListSpecialistPoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'SpecialistPoolServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..d5716ba98d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -0,0 +1,600 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import SpecialistPoolServiceGrpcTransport + + +class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): + """gRPC AsyncIO backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Awaitable[specialist_pool.SpecialistPool]]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + Awaitable[~.SpecialistPool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + Awaitable[~.ListSpecialistPoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py new file mode 100644 index 0000000000..233bb9e47a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TensorboardServiceClient +from .async_client import TensorboardServiceAsyncClient + +__all__ = ( + 'TensorboardServiceClient', + 'TensorboardServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py new file mode 100644 index 0000000000..0c24b8a218 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -0,0 +1,3998 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport +from .client import TensorboardServiceClient + + +class TensorboardServiceAsyncClient: + """TensorboardService""" + + _client: TensorboardServiceClient + + DEFAULT_ENDPOINT = TensorboardServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = TensorboardServiceClient.DEFAULT_MTLS_ENDPOINT + + tensorboard_path = staticmethod(TensorboardServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod(TensorboardServiceClient.parse_tensorboard_path) + tensorboard_experiment_path = staticmethod(TensorboardServiceClient.tensorboard_experiment_path) + parse_tensorboard_experiment_path = staticmethod(TensorboardServiceClient.parse_tensorboard_experiment_path) + tensorboard_run_path = staticmethod(TensorboardServiceClient.tensorboard_run_path) + parse_tensorboard_run_path = staticmethod(TensorboardServiceClient.parse_tensorboard_run_path) + tensorboard_time_series_path = staticmethod(TensorboardServiceClient.tensorboard_time_series_path) + parse_tensorboard_time_series_path = staticmethod(TensorboardServiceClient.parse_tensorboard_time_series_path) + common_billing_account_path = staticmethod(TensorboardServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(TensorboardServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(TensorboardServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(TensorboardServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(TensorboardServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(TensorboardServiceClient.parse_common_organization_path) + common_project_path = staticmethod(TensorboardServiceClient.common_project_path) + parse_common_project_path = staticmethod(TensorboardServiceClient.parse_common_project_path) + common_location_path = staticmethod(TensorboardServiceClient.common_location_path) + parse_common_location_path = staticmethod(TensorboardServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return TensorboardServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> TensorboardServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(TensorboardServiceClient).get_transport_class, type(TensorboardServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, TensorboardServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = TensorboardServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_tensorboard(self, + request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard(self, + request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Tensorboard: + Tensorboard is a physical database + that stores users' training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard(self, + request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard.name", request.tensorboard.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_tensorboards(self, + request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsAsyncPager: + r"""Lists Tensorboards in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + parent (:class:`str`): + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboards, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard(self, + request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_experiment(self, + request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = await client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + parent (:class:`str`): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (:class:`str`): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_experiment(self, + request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_experiment(self, + request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = await client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment.name", request.tensorboard_experiment.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_experiments(self, + request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsAsyncPager: + r"""Lists TensorboardExperiments in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + parent (:class:`str`): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_experiments, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardExperimentsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_experiment(self, + request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_run(self, + request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = await client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (:class:`str`): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_create_tensorboard_runs(self, + request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: + r"""Batch create TensorboardRuns. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest + messages must match this field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]`): + Required. The request message + specifying the TensorboardRuns to + create. A maximum of 1000 + TensorboardRuns can be created in a + batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse: + Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_tensorboard_runs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_run(self, + request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_run(self, + request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = await client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run.name", request.tensorboard_run.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_runs(self, + request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsAsyncPager: + r"""Lists TensorboardRuns in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + parent (:class:`str`): + Required. The resource name of the + TensorboardExperiment to list + TensorboardRuns. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_runs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardRunsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_run(self, + request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_create_tensorboard_time_series(self, + request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: + r"""Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the TensorboardExperiment + to create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in + the CreateTensorboardTimeSeriesRequest messages must be + sub resources of this TensorboardExperiment. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]`): + Required. The request message + specifying the TensorboardTimeSeries to + create. A maximum of 1000 + TensorboardTimeSeries can be created in + a batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse: + Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_time_series(self, + request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_time_series(self, + request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_time_series(self, + request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series.name", request.tensorboard_time_series.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_time_series(self, + request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesAsyncPager: + r"""Lists TensorboardTimeSeries in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardTimeSeriesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_time_series(self, + request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: + r"""Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = await client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + tensorboard (:class:`str`): + Required. The resource name of the Tensorboard + containing TensorboardTimeSeries to read data from. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard", request.tensorboard), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = await client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_blob_data(self, + request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, + *, + time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[tensorboard_service.ReadTensorboardBlobDataResponse]]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = await client.read_tensorboard_blob_data(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_blob_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("time_series", request.time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def write_tensorboard_experiment_data(self, + request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, + *, + tensorboard_experiment: str = None, + write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: + r"""Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = await client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + tensorboard_experiment (:class:`str`): + Required. The resource name of the TensorboardExperiment + to write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + write_run_data_requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]`): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + + This corresponds to the ``write_run_data_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse: + Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if write_run_data_requests: + request.write_run_data_requests.extend(write_run_data_requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_tensorboard_experiment_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment", request.tensorboard_experiment), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def write_tensorboard_run_data(self, + request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1beta1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = await client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (:class:`str`): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]`): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if time_series_data: + request.time_series_data.extend(time_series_data) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_tensorboard_run_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run", request.tensorboard_run), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def export_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataAsyncPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TensorboardServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py new file mode 100644 index 0000000000..3ea3149740 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -0,0 +1,4219 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import TensorboardServiceGrpcTransport +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +class TensorboardServiceClientMeta(type): + """Metaclass for the TensorboardService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] + _transport_registry["grpc"] = TensorboardServiceGrpcTransport + _transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TensorboardServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): + """TensorboardService""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TensorboardServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: + """Returns a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str,str]: + """Parses a tensorboard path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_experiment_path(project: str,location: str,tensorboard: str,experiment: str,) -> str: + """Returns a fully-qualified tensorboard_experiment string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) + + @staticmethod + def parse_tensorboard_experiment_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_experiment path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_run_path(project: str,location: str,tensorboard: str,experiment: str,run: str,) -> str: + """Returns a fully-qualified tensorboard_run string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) + + @staticmethod + def parse_tensorboard_run_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_run path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_time_series_path(project: str,location: str,tensorboard: str,experiment: str,run: str,time_series: str,) -> str: + """Returns a fully-qualified tensorboard_time_series string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) + + @staticmethod + def parse_tensorboard_time_series_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_time_series path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TensorboardServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TensorboardServiceTransport): + # transport is a TensorboardServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_tensorboard(self, + request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + parent (str): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRequest): + request = tensorboard_service.CreateTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def get_tensorboard(self, + request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Tensorboard: + Tensorboard is a physical database + that stores users' training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRequest): + request = tensorboard_service.GetTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard(self, + request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRequest): + request = tensorboard_service.UpdateTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard.name", request.tensorboard.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def list_tensorboards(self, + request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsPager: + r"""Lists Tensorboards in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + parent (str): + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardsRequest): + request = tensorboard_service.ListTensorboardsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboards] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard(self, + request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Tensorboard. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + name (str): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRequest): + request = tensorboard_service.DeleteTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_experiment(self, + request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + parent (str): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardExperimentRequest): + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_experiment(self, + request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest): + request = tensorboard_service.GetTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_experiment(self, + request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardExperimentRequest): + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment.name", request.tensorboard_experiment.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_experiments(self, + request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsPager: + r"""Lists TensorboardExperiments in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + parent (str): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardExperimentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardExperimentsRequest): + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_experiments] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardExperimentsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_experiment(self, + request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardExperimentRequest): + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_run(self, + request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest): + request = tensorboard_service.CreateTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_create_tensorboard_runs(self, + request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: + r"""Batch create TensorboardRuns. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest + messages must match this field. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]): + Required. The request message + specifying the TensorboardRuns to + create. A maximum of 1000 + TensorboardRuns can be created in a + batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse: + Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchCreateTensorboardRunsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.BatchCreateTensorboardRunsRequest): + request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_runs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_run(self, + request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + name (str): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRunRequest): + request = tensorboard_service.GetTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_run(self, + request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest): + request = tensorboard_service.UpdateTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run.name", request.tensorboard_run.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_runs(self, + request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsPager: + r"""Lists TensorboardRuns in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + parent (str): + Required. The resource name of the + TensorboardExperiment to list + TensorboardRuns. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardRunsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest): + request = tensorboard_service.ListTensorboardRunsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardRunsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_run(self, + request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardRun. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest): + request = tensorboard_service.DeleteTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_create_tensorboard_time_series(self, + request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: + r"""Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + parent (str): + Required. The resource name of the TensorboardExperiment + to create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in + the CreateTensorboardTimeSeriesRequest messages must be + sub resources of this TensorboardExperiment. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]): + Required. The request message + specifying the TensorboardTimeSeries to + create. A maximum of 1000 + TensorboardTimeSeries can be created in + a batch. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse: + Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchCreateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_time_series(self, + request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + parent (str): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardTimeSeriesRequest): + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_time_series(self, + request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest): + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_time_series(self, + request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardTimeSeriesRequest): + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series.name", request.tensorboard_time_series.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_time_series(self, + request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesPager: + r"""Lists TensorboardTimeSeries in a Location. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + parent (str): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardTimeSeriesRequest): + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardTimeSeriesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_time_series(self, + request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardTimeSeries. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest, dict]): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardTimeSeriesRequest): + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: + r"""Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + tensorboard (str): + Required. The resource name of the Tensorboard + containing TensorboardTimeSeries to read data from. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_read_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard", request.tensorboard), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest): + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_blob_data(self, + request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, + *, + time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = client.read_tensorboard_blob_data(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest, dict]): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + time_series (str): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardBlobDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest): + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_blob_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("time_series", request.time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def write_tensorboard_experiment_data(self, + request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, + *, + tensorboard_experiment: str = None, + write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: + r"""Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + tensorboard_experiment (str): + Required. The resource name of the TensorboardExperiment + to write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + write_run_data_requests (Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + + This corresponds to the ``write_run_data_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse: + Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.WriteTensorboardExperimentDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.WriteTensorboardExperimentDataRequest): + request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if write_run_data_requests is not None: + request.write_run_data_requests = write_run_data_requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_experiment_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment", request.tensorboard_experiment), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def write_tensorboard_run_data(self, + request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1beta1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest, dict]): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (str): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.WriteTensorboardRunDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest): + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if time_series_data is not None: + request.time_series_data = time_series_data + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_run_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run", request.tensorboard_run), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def export_tensorboard_time_series_data(self, + request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, + *, + tensorboard_time_series: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest, dict]): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest): + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TensorboardServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py new file mode 100644 index 0000000000..e14f0d9d24 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py @@ -0,0 +1,628 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series + + +class ListTensorboardsPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardsResponse], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard.Tensorboard]: + for page in self.pages: + yield from page.tensorboards + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardsAsyncPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardsResponse]], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard.Tensorboard]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboards: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardExperimentsResponse], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_experiment.TensorboardExperiment]: + for page in self.pages: + yield from page.tensorboard_experiments + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsAsyncPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_experiment.TensorboardExperiment]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_experiments: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardRunsResponse], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_run.TensorboardRun]: + for page in self.pages: + yield from page.tensorboard_runs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsAsyncPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardRunsResponse]], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_run.TensorboardRun]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_runs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardTimeSeriesResponse], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_time_series.TensorboardTimeSeries]: + for page in self.pages: + yield from page.tensorboard_time_series + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesAsyncPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_time_series.TensorboardTimeSeries]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_time_series: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__iter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ExportTensorboardTimeSeriesDataResponse], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tensorboard_data.TimeSeriesDataPoint]: + for page in self.pages: + yield from page.time_series_data_points + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataAsyncPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[tensorboard_data.TimeSeriesDataPoint]: + async def async_generator(): + async for page in self.pages: + for response in page.time_series_data_points: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py new file mode 100644 index 0000000000..2c199b74f7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TensorboardServiceTransport +from .grpc import TensorboardServiceGrpcTransport +from .grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] +_transport_registry['grpc'] = TensorboardServiceGrpcTransport +_transport_registry['grpc_asyncio'] = TensorboardServiceGrpcAsyncIOTransport + +__all__ = ( + 'TensorboardServiceTransport', + 'TensorboardServiceGrpcTransport', + 'TensorboardServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py new file mode 100644 index 0000000000..157b4be289 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -0,0 +1,637 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TensorboardServiceTransport(abc.ABC): + """Abstract transport class for TensorboardService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_tensorboard: gapic_v1.method.wrap_method( + self.create_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard: gapic_v1.method.wrap_method( + self.get_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard: gapic_v1.method.wrap_method( + self.update_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboards: gapic_v1.method.wrap_method( + self.list_tensorboards, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard: gapic_v1.method.wrap_method( + self.delete_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_experiment: gapic_v1.method.wrap_method( + self.create_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_experiment: gapic_v1.method.wrap_method( + self.get_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_experiment: gapic_v1.method.wrap_method( + self.update_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_experiments: gapic_v1.method.wrap_method( + self.list_tensorboard_experiments, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_experiment: gapic_v1.method.wrap_method( + self.delete_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_run: gapic_v1.method.wrap_method( + self.create_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.batch_create_tensorboard_runs: gapic_v1.method.wrap_method( + self.batch_create_tensorboard_runs, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_run: gapic_v1.method.wrap_method( + self.get_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_run: gapic_v1.method.wrap_method( + self.update_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_runs: gapic_v1.method.wrap_method( + self.list_tensorboard_runs, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_run: gapic_v1.method.wrap_method( + self.delete_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.batch_create_tensorboard_time_series: gapic_v1.method.wrap_method( + self.batch_create_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_time_series: gapic_v1.method.wrap_method( + self.create_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_time_series: gapic_v1.method.wrap_method( + self.get_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_time_series: gapic_v1.method.wrap_method( + self.update_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_time_series: gapic_v1.method.wrap_method( + self.list_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_time_series: gapic_v1.method.wrap_method( + self.delete_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.batch_read_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.batch_read_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.read_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_blob_data: gapic_v1.method.wrap_method( + self.read_tensorboard_blob_data, + default_timeout=None, + client_info=client_info, + ), + self.write_tensorboard_experiment_data: gapic_v1.method.wrap_method( + self.write_tensorboard_experiment_data, + default_timeout=None, + client_info=client_info, + ), + self.write_tensorboard_run_data: gapic_v1.method.wrap_method( + self.write_tensorboard_run_data, + default_timeout=None, + client_info=client_info, + ), + self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.export_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + Union[ + tensorboard.Tensorboard, + Awaitable[tensorboard.Tensorboard] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + Union[ + tensorboard_service.ListTensorboardsResponse, + Awaitable[tensorboard_service.ListTensorboardsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + Union[ + gca_tensorboard_experiment.TensorboardExperiment, + Awaitable[gca_tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + Union[ + tensorboard_experiment.TensorboardExperiment, + Awaitable[tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + Union[ + gca_tensorboard_experiment.TensorboardExperiment, + Awaitable[gca_tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + Union[ + tensorboard_service.ListTensorboardExperimentsResponse, + Awaitable[tensorboard_service.ListTensorboardExperimentsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + Union[ + gca_tensorboard_run.TensorboardRun, + Awaitable[gca_tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def batch_create_tensorboard_runs(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + Union[ + tensorboard_service.BatchCreateTensorboardRunsResponse, + Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + Union[ + tensorboard_run.TensorboardRun, + Awaitable[tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + Union[ + gca_tensorboard_run.TensorboardRun, + Awaitable[gca_tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + Union[ + tensorboard_service.ListTensorboardRunsResponse, + Awaitable[tensorboard_service.ListTensorboardRunsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + Union[ + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse, + Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + Union[ + tensorboard_time_series.TensorboardTimeSeries, + Awaitable[tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + Union[ + tensorboard_service.ListTensorboardTimeSeriesResponse, + Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.ReadTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + Union[ + tensorboard_service.ReadTensorboardBlobDataResponse, + Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse] + ]]: + raise NotImplementedError() + + @property + def write_tensorboard_experiment_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + Union[ + tensorboard_service.WriteTensorboardExperimentDataResponse, + Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse] + ]]: + raise NotImplementedError() + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + Union[ + tensorboard_service.WriteTensorboardRunDataResponse, + Awaitable[tensorboard_service.WriteTensorboardRunDataResponse] + ]]: + raise NotImplementedError() + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'TensorboardServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py new file mode 100644 index 0000000000..c972aae71a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -0,0 +1,1222 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO + + +class TensorboardServiceGrpcTransport(TensorboardServiceTransport): + """gRPC backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard' not in self._stubs: + self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_tensorboard'] + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + tensorboard.Tensorboard]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + ~.Tensorboard]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard' not in self._stubs: + self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs['get_tensorboard'] + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard' not in self._stubs: + self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_tensorboard'] + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + tensorboard_service.ListTensorboardsResponse]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + ~.ListTensorboardsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboards' not in self._stubs: + self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs['list_tensorboards'] + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard' not in self._stubs: + self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard'] + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_experiment' not in self._stubs: + self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['create_tensorboard_experiment'] + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_experiment' not in self._stubs: + self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['get_tensorboard_experiment'] + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_experiment' not in self._stubs: + self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['update_tensorboard_experiment'] + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + tensorboard_service.ListTensorboardExperimentsResponse]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + ~.ListTensorboardExperimentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_experiments' not in self._stubs: + self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs['list_tensorboard_experiments'] + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_experiment' not in self._stubs: + self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_experiment'] + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_run' not in self._stubs: + self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['create_tensorboard_run'] + + @property + def batch_create_tensorboard_runs(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + tensorboard_service.BatchCreateTensorboardRunsResponse]: + r"""Return a callable for the batch create tensorboard runs method over gRPC. + + Batch create TensorboardRuns. + + Returns: + Callable[[~.BatchCreateTensorboardRunsRequest], + ~.BatchCreateTensorboardRunsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_runs' not in self._stubs: + self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardRuns', + request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_runs'] + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + tensorboard_run.TensorboardRun]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_run' not in self._stubs: + self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['get_tensorboard_run'] + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_run' not in self._stubs: + self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['update_tensorboard_run'] + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + tensorboard_service.ListTensorboardRunsResponse]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + ~.ListTensorboardRunsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_runs' not in self._stubs: + self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs['list_tensorboard_runs'] + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_run' not in self._stubs: + self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_run'] + + @property + def batch_create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]: + r"""Return a callable for the batch create tensorboard time + series method over gRPC. + + Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Returns: + Callable[[~.BatchCreateTensorboardTimeSeriesRequest], + ~.BatchCreateTensorboardTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_time_series' not in self._stubs: + self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardTimeSeries', + request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_time_series'] + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_time_series' not in self._stubs: + self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['create_tensorboard_time_series'] + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_time_series' not in self._stubs: + self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['get_tensorboard_time_series'] + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_time_series' not in self._stubs: + self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['update_tensorboard_time_series'] + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + tensorboard_service.ListTensorboardTimeSeriesResponse]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + ~.ListTensorboardTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_time_series' not in self._stubs: + self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['list_tensorboard_time_series'] + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_time_series' not in self._stubs: + self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_time_series'] + + @property + def batch_read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the batch read tensorboard time + series data method over gRPC. + + Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Returns: + Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], + ~.BatchReadTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_tensorboard_time_series_data' not in self._stubs: + self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['batch_read_tensorboard_time_series_data'] + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + tensorboard_service.ReadTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + ~.ReadTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_time_series_data' not in self._stubs: + self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_time_series_data'] + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + tensorboard_service.ReadTensorboardBlobDataResponse]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + ~.ReadTensorboardBlobDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_blob_data' not in self._stubs: + self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_blob_data'] + + @property + def write_tensorboard_experiment_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + tensorboard_service.WriteTensorboardExperimentDataResponse]: + r"""Return a callable for the write tensorboard experiment + data method over gRPC. + + Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardExperimentDataRequest], + ~.WriteTensorboardExperimentDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_experiment_data' not in self._stubs: + self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardExperimentData', + request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_experiment_data'] + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + tensorboard_service.WriteTensorboardRunDataResponse]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + ~.WriteTensorboardRunDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_run_data' not in self._stubs: + self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_run_data'] + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + ~.ExportTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_tensorboard_time_series_data' not in self._stubs: + self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['export_tensorboard_time_series_data'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'TensorboardServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..b3b73eb998 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py @@ -0,0 +1,1221 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import TensorboardServiceGrpcTransport + + +class TensorboardServiceGrpcAsyncIOTransport(TensorboardServiceTransport): + """gRPC AsyncIO backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard' not in self._stubs: + self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_tensorboard'] + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + Awaitable[tensorboard.Tensorboard]]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + Awaitable[~.Tensorboard]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard' not in self._stubs: + self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs['get_tensorboard'] + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard' not in self._stubs: + self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_tensorboard'] + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + Awaitable[tensorboard_service.ListTensorboardsResponse]]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + Awaitable[~.ListTensorboardsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboards' not in self._stubs: + self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs['list_tensorboards'] + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard' not in self._stubs: + self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard'] + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_experiment' not in self._stubs: + self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['create_tensorboard_experiment'] + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + Awaitable[tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_experiment' not in self._stubs: + self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['get_tensorboard_experiment'] + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_experiment' not in self._stubs: + self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['update_tensorboard_experiment'] + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + Awaitable[~.ListTensorboardExperimentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_experiments' not in self._stubs: + self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs['list_tensorboard_experiments'] + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_experiment' not in self._stubs: + self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_experiment'] + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun]]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_run' not in self._stubs: + self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['create_tensorboard_run'] + + @property + def batch_create_tensorboard_runs(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardRunsRequest], + Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse]]: + r"""Return a callable for the batch create tensorboard runs method over gRPC. + + Batch create TensorboardRuns. + + Returns: + Callable[[~.BatchCreateTensorboardRunsRequest], + Awaitable[~.BatchCreateTensorboardRunsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_runs' not in self._stubs: + self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardRuns', + request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_runs'] + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + Awaitable[tensorboard_run.TensorboardRun]]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_run' not in self._stubs: + self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['get_tensorboard_run'] + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun]]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_run' not in self._stubs: + self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['update_tensorboard_run'] + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + Awaitable[tensorboard_service.ListTensorboardRunsResponse]]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + Awaitable[~.ListTensorboardRunsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_runs' not in self._stubs: + self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs['list_tensorboard_runs'] + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_run' not in self._stubs: + self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_run'] + + @property + def batch_create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], + Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]]: + r"""Return a callable for the batch create tensorboard time + series method over gRPC. + + Batch create TensorboardTimeSeries that belong to a + TensorboardExperiment. + + Returns: + Callable[[~.BatchCreateTensorboardTimeSeriesRequest], + Awaitable[~.BatchCreateTensorboardTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_tensorboard_time_series' not in self._stubs: + self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardTimeSeries', + request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['batch_create_tensorboard_time_series'] + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_time_series' not in self._stubs: + self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['create_tensorboard_time_series'] + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + Awaitable[tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_time_series' not in self._stubs: + self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['get_tensorboard_time_series'] + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_time_series' not in self._stubs: + self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['update_tensorboard_time_series'] + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + Awaitable[~.ListTensorboardTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_time_series' not in self._stubs: + self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['list_tensorboard_time_series'] + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_time_series' not in self._stubs: + self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_time_series'] + + @property + def batch_read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the batch read tensorboard time + series data method over gRPC. + + Reads multiple TensorboardTimeSeries' data. The data + point number limit is 1000 for scalars, 100 for tensors + and blob references. If the number of data points stored + is less than the limit, all data will be returned. + Otherwise, that limit number of data points will be + randomly selected from this time series and returned. + + Returns: + Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], + Awaitable[~.BatchReadTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_tensorboard_time_series_data' not in self._stubs: + self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['batch_read_tensorboard_time_series_data'] + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. By default, if the number + of data points stored is less than 1000, all data will be + returned. Otherwise, 1000 data points will be randomly selected + from this time series and returned. This value can be changed by + changing max_data_points, which can't be greater than 10k. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + Awaitable[~.ReadTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_time_series_data' not in self._stubs: + self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_time_series_data'] + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse]]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + Awaitable[~.ReadTensorboardBlobDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_blob_data' not in self._stubs: + self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_blob_data'] + + @property + def write_tensorboard_experiment_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardExperimentDataRequest], + Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse]]: + r"""Return a callable for the write tensorboard experiment + data method over gRPC. + + Write time series data points of multiple + TensorboardTimeSeries in multiple TensorboardRun's. If + any data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardExperimentDataRequest], + Awaitable[~.WriteTensorboardExperimentDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_experiment_data' not in self._stubs: + self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardExperimentData', + request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_experiment_data'] + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + Awaitable[tensorboard_service.WriteTensorboardRunDataResponse]]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + Awaitable[~.WriteTensorboardRunDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_run_data' not in self._stubs: + self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_run_data'] + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + Awaitable[~.ExportTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_tensorboard_time_series_data' not in self._stubs: + self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['export_tensorboard_time_series_data'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'TensorboardServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py new file mode 100644 index 0000000000..5a652ada3e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import VizierServiceClient +from .async_client import VizierServiceAsyncClient + +__all__ = ( + 'VizierServiceClient', + 'VizierServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py new file mode 100644 index 0000000000..db4b13f980 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -0,0 +1,2264 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport +from .client import VizierServiceClient + + +class VizierServiceAsyncClient: + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + """ + + _client: VizierServiceClient + + DEFAULT_ENDPOINT = VizierServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = VizierServiceClient.DEFAULT_MTLS_ENDPOINT + + custom_job_path = staticmethod(VizierServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(VizierServiceClient.parse_custom_job_path) + study_path = staticmethod(VizierServiceClient.study_path) + parse_study_path = staticmethod(VizierServiceClient.parse_study_path) + trial_path = staticmethod(VizierServiceClient.trial_path) + parse_trial_path = staticmethod(VizierServiceClient.parse_trial_path) + common_billing_account_path = staticmethod(VizierServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(VizierServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(VizierServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(VizierServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(VizierServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(VizierServiceClient.parse_common_organization_path) + common_project_path = staticmethod(VizierServiceClient.common_project_path) + parse_common_project_path = staticmethod(VizierServiceClient.parse_common_project_path) + common_location_path = staticmethod(VizierServiceClient.common_location_path) + parse_common_location_path = staticmethod(VizierServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceAsyncClient: The constructed client. + """ + return VizierServiceClient.from_service_account_info.__func__(VizierServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceAsyncClient: The constructed client. + """ + return VizierServiceClient.from_service_account_file.__func__(VizierServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return VizierServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> VizierServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VizierServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(VizierServiceClient).get_transport_class, type(VizierServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, VizierServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vizier service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.VizierServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = VizierServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_study(self, + request: Union[vizier_service.CreateStudyRequest, dict] = None, + *, + parent: str = None, + study: gca_study.Study = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_study.Study: + r"""Creates a Study. A resource name will be generated + after creation of the Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + study = aiplatform_v1beta1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1beta1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = await client.create_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateStudyRequest, dict]): + The request object. Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. + parent (:class:`str`): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + study (:class:`google.cloud.aiplatform_v1beta1.types.Study`): + Required. The Study configuration + used to create the Study. + + This corresponds to the ``study`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, study]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.CreateStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if study is not None: + request.study = study + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_study(self, + request: Union[vizier_service.GetStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Gets a Study by name. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = await client.get_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetStudyRequest, dict]): + The request object. Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. + name (:class:`str`): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.GetStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_studies(self, + request: Union[vizier_service.ListStudiesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListStudiesAsyncPager: + r"""Lists all the studies in a region for an associated + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_studies(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListStudiesRequest, dict]): + The request object. Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesAsyncPager: + Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListStudiesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_studies, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListStudiesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_study(self, + request: Union[vizier_service.DeleteStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + await client.delete_study(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest, dict]): + The request object. Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. + name (:class:`str`): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.DeleteStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def lookup_study(self, + request: Union[vizier_service.LookupStudyRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_lookup_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = await client.lookup_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.LookupStudyRequest, dict]): + The request object. Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. + parent (:class:`str`): + Required. The resource name of the Location to get the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.LookupStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.lookup_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def suggest_trials(self, + request: Union[vizier_service.SuggestTrialsRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_suggest_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest, dict]): + The request object. Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` + Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.SuggestTrialsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.suggest_trials, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vizier_service.SuggestTrialsResponse, + metadata_type=vizier_service.SuggestTrialsMetadata, + ) + + # Done; return the response. + return response + + async def create_trial(self, + request: Union[vizier_service.CreateTrialRequest, dict] = None, + *, + parent: str = None, + trial: study.Trial = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a user provided Trial to a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_create_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrialRequest, dict]): + The request object. Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. + parent (:class:`str`): + Required. The resource name of the Study to create the + Trial in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + trial (:class:`google.cloud.aiplatform_v1beta1.types.Trial`): + Required. The Trial to create. + This corresponds to the ``trial`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, trial]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.CreateTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if trial is not None: + request.trial = trial + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_trial(self, + request: Union[vizier_service.GetTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Gets a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_get_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.get_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTrialRequest, dict]): + The request object. Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. + name (:class:`str`): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.GetTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_trials(self, + request: Union[vizier_service.ListTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrialsAsyncPager: + r"""Lists the Trials associated with a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + parent (:class:`str`): + Required. The resource name of the Study to list the + Trial from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsAsyncPager: + Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListTrialsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_trials, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTrialsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def add_trial_measurement(self, + request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = await client.add_trial_measurement(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest, dict]): + The request object. Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.AddTrialMeasurementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_trial_measurement, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def complete_trial(self, + request: Union[vizier_service.CompleteTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Marks a Trial as complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_complete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.complete_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest, dict]): + The request object. Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.CompleteTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.complete_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_trial(self, + request: Union[vizier_service.DeleteTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + await client.delete_trial(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest, dict]): + The request object. Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. + name (:class:`str`): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.DeleteTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def check_trial_early_stopping_state(self, + request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest, dict]): + The request object. Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` + Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_trial_early_stopping_state, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vizier_service.CheckTrialEarlyStoppingStateResponse, + metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, + ) + + # Done; return the response. + return response + + async def stop_trial(self, + request: Union[vizier_service.StopTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Stops a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_stop_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.stop_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.StopTrialRequest, dict]): + The request object. Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.StopTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stop_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_optimal_trials(self, + request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vizier_service.ListOptimalTrialsResponse: + r"""Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + async def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_optimal_trials(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + parent (:class:`str`): + Required. The name of the Study that + the optimal Trial belongs to. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: + Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListOptimalTrialsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_optimal_trials, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VizierServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py new file mode 100644 index 0000000000..67edcc77c4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -0,0 +1,2481 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import VizierServiceGrpcTransport +from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport + + +class VizierServiceClientMeta(type): + """Metaclass for the VizierService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] + _transport_registry["grpc"] = VizierServiceGrpcTransport + _transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[VizierServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class VizierServiceClient(metaclass=VizierServiceClientMeta): + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VizierServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VizierServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def study_path(project: str,location: str,study: str,) -> str: + """Returns a fully-qualified study string.""" + return "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) + + @staticmethod + def parse_study_path(path: str) -> Dict[str,str]: + """Parses a study path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def trial_path(project: str,location: str,study: str,trial: str,) -> str: + """Returns a fully-qualified trial string.""" + return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + + @staticmethod + def parse_trial_path(path: str) -> Dict[str,str]: + """Parses a trial path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, VizierServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vizier service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, VizierServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, VizierServiceTransport): + # transport is a VizierServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def create_study(self, + request: Union[vizier_service.CreateStudyRequest, dict] = None, + *, + parent: str = None, + study: gca_study.Study = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_study.Study: + r"""Creates a Study. A resource name will be generated + after creation of the Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + study = aiplatform_v1beta1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1beta1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = client.create_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateStudyRequest, dict]): + The request object. Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. + parent (str): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + study (google.cloud.aiplatform_v1beta1.types.Study): + Required. The Study configuration + used to create the Study. + + This corresponds to the ``study`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, study]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CreateStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CreateStudyRequest): + request = vizier_service.CreateStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if study is not None: + request.study = study + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_study(self, + request: Union[vizier_service.GetStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Gets a Study by name. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = client.get_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetStudyRequest, dict]): + The request object. Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. + name (str): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.GetStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.GetStudyRequest): + request = vizier_service.GetStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_studies(self, + request: Union[vizier_service.ListStudiesRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListStudiesPager: + r"""Lists all the studies in a region for an associated + project. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_studies(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListStudiesRequest, dict]): + The request object. Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + parent (str): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesPager: + Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListStudiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListStudiesRequest): + request = vizier_service.ListStudiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_studies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListStudiesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_study(self, + request: Union[vizier_service.DeleteStudyRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + client.delete_study(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest, dict]): + The request object. Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. + name (str): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.DeleteStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.DeleteStudyRequest): + request = vizier_service.DeleteStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def lookup_study(self, + request: Union[vizier_service.LookupStudyRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_lookup_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = client.lookup_study(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.LookupStudyRequest, dict]): + The request object. Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. + parent (str): + Required. The resource name of the Location to get the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.LookupStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.LookupStudyRequest): + request = vizier_service.LookupStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.lookup_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def suggest_trials(self, + request: Union[vizier_service.SuggestTrialsRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_suggest_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest, dict]): + The request object. Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` + Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.SuggestTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.SuggestTrialsRequest): + request = vizier_service.SuggestTrialsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.suggest_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + vizier_service.SuggestTrialsResponse, + metadata_type=vizier_service.SuggestTrialsMetadata, + ) + + # Done; return the response. + return response + + def create_trial(self, + request: Union[vizier_service.CreateTrialRequest, dict] = None, + *, + parent: str = None, + trial: study.Trial = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a user provided Trial to a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_create_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrialRequest, dict]): + The request object. Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. + parent (str): + Required. The resource name of the Study to create the + Trial in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + trial (google.cloud.aiplatform_v1beta1.types.Trial): + Required. The Trial to create. + This corresponds to the ``trial`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, trial]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CreateTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CreateTrialRequest): + request = vizier_service.CreateTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if trial is not None: + request.trial = trial + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_trial(self, + request: Union[vizier_service.GetTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Gets a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_get_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = client.get_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetTrialRequest, dict]): + The request object. Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. + name (str): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.GetTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.GetTrialRequest): + request = vizier_service.GetTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_trials(self, + request: Union[vizier_service.ListTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrialsPager: + r"""Lists the Trials associated with a Study. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + parent (str): + Required. The resource name of the Study to list the + Trial from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsPager: + Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListTrialsRequest): + request = vizier_service.ListTrialsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTrialsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_trial_measurement(self, + request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = client.add_trial_measurement(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest, dict]): + The request object. Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.AddTrialMeasurementRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.AddTrialMeasurementRequest): + request = vizier_service.AddTrialMeasurementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_trial_measurement] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def complete_trial(self, + request: Union[vizier_service.CompleteTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Marks a Trial as complete. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_complete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = client.complete_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest, dict]): + The request object. Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CompleteTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CompleteTrialRequest): + request = vizier_service.CompleteTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.complete_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_trial(self, + request: Union[vizier_service.DeleteTrialRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_delete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + client.delete_trial(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest, dict]): + The request object. Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.DeleteTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.DeleteTrialRequest): + request = vizier_service.DeleteTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def check_trial_early_stopping_state(self, + request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest, dict]): + The request object. Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` + Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CheckTrialEarlyStoppingStateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CheckTrialEarlyStoppingStateRequest): + request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_trial_early_stopping_state] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + vizier_service.CheckTrialEarlyStoppingStateResponse, + metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, + ) + + # Done; return the response. + return response + + def stop_trial(self, + request: Union[vizier_service.StopTrialRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Stops a Trial. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_stop_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = client.stop_trial(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.StopTrialRequest, dict]): + The request object. Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.StopTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.StopTrialRequest): + request = vizier_service.StopTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stop_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_optimal_trials(self, + request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vizier_service.ListOptimalTrialsResponse: + r"""Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_optimal_trials(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest, dict]): + The request object. Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + parent (str): + Required. The name of the Study that + the optimal Trial belongs to. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: + Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListOptimalTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListOptimalTrialsRequest): + request = vizier_service.ListOptimalTrialsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_optimal_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: operations_pb2.ListOperationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: operations_pb2.GetOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: operations_pb2.DeleteOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: operations_pb2.CancelOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def wait_operation( + self, + request: operations_pb2.WaitOperationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + **JSON Example** + :: + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + **YAML Example** + :: + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_location( + self, + request: locations_pb2.GetLocationRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: locations_pb2.ListLocationsRequest = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VizierServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py new file mode 100644 index 0000000000..c2044e5044 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import vizier_service + + +class ListStudiesPager: + """A pager for iterating through ``list_studies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``studies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListStudies`` requests and continue to iterate + through the ``studies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., vizier_service.ListStudiesResponse], + request: vizier_service.ListStudiesRequest, + response: vizier_service.ListStudiesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListStudiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[vizier_service.ListStudiesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[study.Study]: + for page in self.pages: + yield from page.studies + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListStudiesAsyncPager: + """A pager for iterating through ``list_studies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``studies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListStudies`` requests and continue to iterate + through the ``studies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[vizier_service.ListStudiesResponse]], + request: vizier_service.ListStudiesRequest, + response: vizier_service.ListStudiesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListStudiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[vizier_service.ListStudiesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[study.Study]: + async def async_generator(): + async for page in self.pages: + for response in page.studies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrialsPager: + """A pager for iterating through ``list_trials`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``trials`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTrials`` requests and continue to iterate + through the ``trials`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., vizier_service.ListTrialsResponse], + request: vizier_service.ListTrialsRequest, + response: vizier_service.ListTrialsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListTrialsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[vizier_service.ListTrialsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[study.Trial]: + for page in self.pages: + yield from page.trials + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrialsAsyncPager: + """A pager for iterating through ``list_trials`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``trials`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTrials`` requests and continue to iterate + through the ``trials`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[vizier_service.ListTrialsResponse]], + request: vizier_service.ListTrialsRequest, + response: vizier_service.ListTrialsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListTrialsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[vizier_service.ListTrialsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[study.Trial]: + async def async_generator(): + async for page in self.pages: + for response in page.trials: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py new file mode 100644 index 0000000000..b2efe6a224 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import VizierServiceTransport +from .grpc import VizierServiceGrpcTransport +from .grpc_asyncio import VizierServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] +_transport_registry['grpc'] = VizierServiceGrpcTransport +_transport_registry['grpc_asyncio'] = VizierServiceGrpcAsyncIOTransport + +__all__ = ( + 'VizierServiceTransport', + 'VizierServiceGrpcTransport', + 'VizierServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py new file mode 100644 index 0000000000..83055f7151 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py @@ -0,0 +1,451 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class VizierServiceTransport(abc.ABC): + """Abstract transport class for VizierService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_study: gapic_v1.method.wrap_method( + self.create_study, + default_timeout=5.0, + client_info=client_info, + ), + self.get_study: gapic_v1.method.wrap_method( + self.get_study, + default_timeout=5.0, + client_info=client_info, + ), + self.list_studies: gapic_v1.method.wrap_method( + self.list_studies, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_study: gapic_v1.method.wrap_method( + self.delete_study, + default_timeout=5.0, + client_info=client_info, + ), + self.lookup_study: gapic_v1.method.wrap_method( + self.lookup_study, + default_timeout=5.0, + client_info=client_info, + ), + self.suggest_trials: gapic_v1.method.wrap_method( + self.suggest_trials, + default_timeout=5.0, + client_info=client_info, + ), + self.create_trial: gapic_v1.method.wrap_method( + self.create_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.get_trial: gapic_v1.method.wrap_method( + self.get_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.list_trials: gapic_v1.method.wrap_method( + self.list_trials, + default_timeout=5.0, + client_info=client_info, + ), + self.add_trial_measurement: gapic_v1.method.wrap_method( + self.add_trial_measurement, + default_timeout=5.0, + client_info=client_info, + ), + self.complete_trial: gapic_v1.method.wrap_method( + self.complete_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_trial: gapic_v1.method.wrap_method( + self.delete_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.check_trial_early_stopping_state: gapic_v1.method.wrap_method( + self.check_trial_early_stopping_state, + default_timeout=5.0, + client_info=client_info, + ), + self.stop_trial: gapic_v1.method.wrap_method( + self.stop_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.list_optimal_trials: gapic_v1.method.wrap_method( + self.list_optimal_trials, + default_timeout=5.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + Union[ + gca_study.Study, + Awaitable[gca_study.Study] + ]]: + raise NotImplementedError() + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + Union[ + study.Study, + Awaitable[study.Study] + ]]: + raise NotImplementedError() + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + Union[ + vizier_service.ListStudiesResponse, + Awaitable[vizier_service.ListStudiesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + Union[ + study.Study, + Awaitable[study.Study] + ]]: + raise NotImplementedError() + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + Union[ + vizier_service.ListTrialsResponse, + Awaitable[vizier_service.ListTrialsResponse] + ]]: + raise NotImplementedError() + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + Union[ + vizier_service.ListOptimalTrialsResponse, + Awaitable[vizier_service.ListOptimalTrialsResponse] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'VizierServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py new file mode 100644 index 0000000000..1a14edc89d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -0,0 +1,876 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO + + +class VizierServiceGrpcTransport(VizierServiceTransport): + """gRPC backend transport for VizierService. + + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + gca_study.Study]: + r"""Return a callable for the create study method over gRPC. + + Creates a Study. A resource name will be generated + after creation of the Study. + + Returns: + Callable[[~.CreateStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_study' not in self._stubs: + self._stubs['create_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', + request_serializer=vizier_service.CreateStudyRequest.serialize, + response_deserializer=gca_study.Study.deserialize, + ) + return self._stubs['create_study'] + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + study.Study]: + r"""Return a callable for the get study method over gRPC. + + Gets a Study by name. + + Returns: + Callable[[~.GetStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_study' not in self._stubs: + self._stubs['get_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', + request_serializer=vizier_service.GetStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['get_study'] + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + vizier_service.ListStudiesResponse]: + r"""Return a callable for the list studies method over gRPC. + + Lists all the studies in a region for an associated + project. + + Returns: + Callable[[~.ListStudiesRequest], + ~.ListStudiesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_studies' not in self._stubs: + self._stubs['list_studies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', + request_serializer=vizier_service.ListStudiesRequest.serialize, + response_deserializer=vizier_service.ListStudiesResponse.deserialize, + ) + return self._stubs['list_studies'] + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete study method over gRPC. + + Deletes a Study. + + Returns: + Callable[[~.DeleteStudyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_study' not in self._stubs: + self._stubs['delete_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', + request_serializer=vizier_service.DeleteStudyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_study'] + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + study.Study]: + r"""Return a callable for the lookup study method over gRPC. + + Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Returns: + Callable[[~.LookupStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'lookup_study' not in self._stubs: + self._stubs['lookup_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', + request_serializer=vizier_service.LookupStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['lookup_study'] + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + operations_pb2.Operation]: + r"""Return a callable for the suggest trials method over gRPC. + + Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Returns: + Callable[[~.SuggestTrialsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'suggest_trials' not in self._stubs: + self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', + request_serializer=vizier_service.SuggestTrialsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['suggest_trials'] + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + study.Trial]: + r"""Return a callable for the create trial method over gRPC. + + Adds a user provided Trial to a Study. + + Returns: + Callable[[~.CreateTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_trial' not in self._stubs: + self._stubs['create_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', + request_serializer=vizier_service.CreateTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['create_trial'] + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + study.Trial]: + r"""Return a callable for the get trial method over gRPC. + + Gets a Trial. + + Returns: + Callable[[~.GetTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_trial' not in self._stubs: + self._stubs['get_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', + request_serializer=vizier_service.GetTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['get_trial'] + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + vizier_service.ListTrialsResponse]: + r"""Return a callable for the list trials method over gRPC. + + Lists the Trials associated with a Study. + + Returns: + Callable[[~.ListTrialsRequest], + ~.ListTrialsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_trials' not in self._stubs: + self._stubs['list_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', + request_serializer=vizier_service.ListTrialsRequest.serialize, + response_deserializer=vizier_service.ListTrialsResponse.deserialize, + ) + return self._stubs['list_trials'] + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + study.Trial]: + r"""Return a callable for the add trial measurement method over gRPC. + + Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Returns: + Callable[[~.AddTrialMeasurementRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_trial_measurement' not in self._stubs: + self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', + request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['add_trial_measurement'] + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + study.Trial]: + r"""Return a callable for the complete trial method over gRPC. + + Marks a Trial as complete. + + Returns: + Callable[[~.CompleteTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_trial' not in self._stubs: + self._stubs['complete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', + request_serializer=vizier_service.CompleteTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['complete_trial'] + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete trial method over gRPC. + + Deletes a Trial. + + Returns: + Callable[[~.DeleteTrialRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_trial' not in self._stubs: + self._stubs['delete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', + request_serializer=vizier_service.DeleteTrialRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_trial'] + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + operations_pb2.Operation]: + r"""Return a callable for the check trial early stopping + state method over gRPC. + + Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Returns: + Callable[[~.CheckTrialEarlyStoppingStateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_trial_early_stopping_state' not in self._stubs: + self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', + request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['check_trial_early_stopping_state'] + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + study.Trial]: + r"""Return a callable for the stop trial method over gRPC. + + Stops a Trial. + + Returns: + Callable[[~.StopTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_trial' not in self._stubs: + self._stubs['stop_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', + request_serializer=vizier_service.StopTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['stop_trial'] + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + vizier_service.ListOptimalTrialsResponse]: + r"""Return a callable for the list optimal trials method over gRPC. + + Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Returns: + Callable[[~.ListOptimalTrialsRequest], + ~.ListOptimalTrialsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_optimal_trials' not in self._stubs: + self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', + request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, + response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, + ) + return self._stubs['list_optimal_trials'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'VizierServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..0de96ff050 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -0,0 +1,875 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import VizierServiceGrpcTransport + + +class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): + """gRPC AsyncIO backend transport for VizierService. + + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + Awaitable[gca_study.Study]]: + r"""Return a callable for the create study method over gRPC. + + Creates a Study. A resource name will be generated + after creation of the Study. + + Returns: + Callable[[~.CreateStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_study' not in self._stubs: + self._stubs['create_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', + request_serializer=vizier_service.CreateStudyRequest.serialize, + response_deserializer=gca_study.Study.deserialize, + ) + return self._stubs['create_study'] + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + Awaitable[study.Study]]: + r"""Return a callable for the get study method over gRPC. + + Gets a Study by name. + + Returns: + Callable[[~.GetStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_study' not in self._stubs: + self._stubs['get_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', + request_serializer=vizier_service.GetStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['get_study'] + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + Awaitable[vizier_service.ListStudiesResponse]]: + r"""Return a callable for the list studies method over gRPC. + + Lists all the studies in a region for an associated + project. + + Returns: + Callable[[~.ListStudiesRequest], + Awaitable[~.ListStudiesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_studies' not in self._stubs: + self._stubs['list_studies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', + request_serializer=vizier_service.ListStudiesRequest.serialize, + response_deserializer=vizier_service.ListStudiesResponse.deserialize, + ) + return self._stubs['list_studies'] + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete study method over gRPC. + + Deletes a Study. + + Returns: + Callable[[~.DeleteStudyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_study' not in self._stubs: + self._stubs['delete_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', + request_serializer=vizier_service.DeleteStudyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_study'] + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + Awaitable[study.Study]]: + r"""Return a callable for the lookup study method over gRPC. + + Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Returns: + Callable[[~.LookupStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'lookup_study' not in self._stubs: + self._stubs['lookup_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', + request_serializer=vizier_service.LookupStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['lookup_study'] + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the suggest trials method over gRPC. + + Adds one or more Trials to a Study, with parameter values + suggested by Vertex AI Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Returns: + Callable[[~.SuggestTrialsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'suggest_trials' not in self._stubs: + self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', + request_serializer=vizier_service.SuggestTrialsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['suggest_trials'] + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the create trial method over gRPC. + + Adds a user provided Trial to a Study. + + Returns: + Callable[[~.CreateTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_trial' not in self._stubs: + self._stubs['create_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', + request_serializer=vizier_service.CreateTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['create_trial'] + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the get trial method over gRPC. + + Gets a Trial. + + Returns: + Callable[[~.GetTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_trial' not in self._stubs: + self._stubs['get_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', + request_serializer=vizier_service.GetTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['get_trial'] + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + Awaitable[vizier_service.ListTrialsResponse]]: + r"""Return a callable for the list trials method over gRPC. + + Lists the Trials associated with a Study. + + Returns: + Callable[[~.ListTrialsRequest], + Awaitable[~.ListTrialsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_trials' not in self._stubs: + self._stubs['list_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', + request_serializer=vizier_service.ListTrialsRequest.serialize, + response_deserializer=vizier_service.ListTrialsResponse.deserialize, + ) + return self._stubs['list_trials'] + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the add trial measurement method over gRPC. + + Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Returns: + Callable[[~.AddTrialMeasurementRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_trial_measurement' not in self._stubs: + self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', + request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['add_trial_measurement'] + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the complete trial method over gRPC. + + Marks a Trial as complete. + + Returns: + Callable[[~.CompleteTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_trial' not in self._stubs: + self._stubs['complete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', + request_serializer=vizier_service.CompleteTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['complete_trial'] + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete trial method over gRPC. + + Deletes a Trial. + + Returns: + Callable[[~.DeleteTrialRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_trial' not in self._stubs: + self._stubs['delete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', + request_serializer=vizier_service.DeleteTrialRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_trial'] + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the check trial early stopping + state method over gRPC. + + Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Returns: + Callable[[~.CheckTrialEarlyStoppingStateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_trial_early_stopping_state' not in self._stubs: + self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', + request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['check_trial_early_stopping_state'] + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the stop trial method over gRPC. + + Stops a Trial. + + Returns: + Callable[[~.StopTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_trial' not in self._stubs: + self._stubs['stop_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', + request_serializer=vizier_service.StopTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['stop_trial'] + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + Awaitable[vizier_service.ListOptimalTrialsResponse]]: + r"""Return a callable for the list optimal trials method over gRPC. + + Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Returns: + Callable[[~.ListOptimalTrialsRequest], + Awaitable[~.ListOptimalTrialsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_optimal_trials' not in self._stubs: + self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', + request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, + response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, + ) + return self._stubs['list_optimal_trials'] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], iam_policy_pb2.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ( + 'VizierServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py new file mode 100644 index 0000000000..e7cfaba30a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -0,0 +1,1036 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .annotation import ( + Annotation, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .artifact import ( + Artifact, +) +from .batch_prediction_job import ( + BatchPredictionJob, +) +from .completion_stats import ( + CompletionStats, +) +from .context import ( + Context, +) +from .custom_job import ( + ContainerSpec, + CustomJob, + CustomJobSpec, + PythonPackageSpec, + Scheduling, + WorkerPoolSpec, +) +from .data_item import ( + DataItem, +) +from .data_labeling_job import ( + ActiveLearningConfig, + DataLabelingJob, + SampleConfig, + TrainingConfig, +) +from .dataset import ( + Dataset, + ExportDataConfig, + ImportDataConfig, +) +from .dataset_service import ( + CreateDatasetOperationMetadata, + CreateDatasetRequest, + DeleteDatasetRequest, + ExportDataOperationMetadata, + ExportDataRequest, + ExportDataResponse, + GetAnnotationSpecRequest, + GetDatasetRequest, + ImportDataOperationMetadata, + ImportDataRequest, + ImportDataResponse, + ListAnnotationsRequest, + ListAnnotationsResponse, + ListDataItemsRequest, + ListDataItemsResponse, + ListDatasetsRequest, + ListDatasetsResponse, + UpdateDatasetRequest, +) +from .deployed_index_ref import ( + DeployedIndexRef, +) +from .deployed_model_ref import ( + DeployedModelRef, +) +from .encryption_spec import ( + EncryptionSpec, +) +from .endpoint import ( + DeployedModel, + Endpoint, + PredictRequestResponseLoggingConfig, + PrivateEndpoints, +) +from .endpoint_service import ( + CreateEndpointOperationMetadata, + CreateEndpointRequest, + DeleteEndpointRequest, + DeployModelOperationMetadata, + DeployModelRequest, + DeployModelResponse, + GetEndpointRequest, + ListEndpointsRequest, + ListEndpointsResponse, + UndeployModelOperationMetadata, + UndeployModelRequest, + UndeployModelResponse, + UpdateEndpointRequest, +) +from .entity_type import ( + EntityType, +) +from .env_var import ( + EnvVar, +) +from .event import ( + Event, +) +from .execution import ( + Execution, +) +from .explanation import ( + Attribution, + BlurBaselineConfig, + Examples, + ExamplesOverride, + ExamplesRestrictionsNamespace, + Explanation, + ExplanationMetadataOverride, + ExplanationParameters, + ExplanationSpec, + ExplanationSpecOverride, + FeatureNoiseSigma, + IntegratedGradientsAttribution, + ModelExplanation, + Neighbor, + Presets, + SampledShapleyAttribution, + SmoothGradConfig, + XraiAttribution, +) +from .explanation_metadata import ( + ExplanationMetadata, +) +from .feature import ( + Feature, +) +from .feature_monitoring_stats import ( + FeatureStatsAnomaly, +) +from .feature_selector import ( + FeatureSelector, + IdMatcher, +) +from .featurestore import ( + Featurestore, +) +from .featurestore_monitoring import ( + FeaturestoreMonitoringConfig, +) +from .featurestore_online_service import ( + FeatureValue, + FeatureValueList, + ReadFeatureValuesRequest, + ReadFeatureValuesResponse, + StreamingReadFeatureValuesRequest, +) +from .featurestore_service import ( + BatchCreateFeaturesOperationMetadata, + BatchCreateFeaturesRequest, + BatchCreateFeaturesResponse, + BatchReadFeatureValuesOperationMetadata, + BatchReadFeatureValuesRequest, + BatchReadFeatureValuesResponse, + CreateEntityTypeOperationMetadata, + CreateEntityTypeRequest, + CreateFeatureOperationMetadata, + CreateFeatureRequest, + CreateFeaturestoreOperationMetadata, + CreateFeaturestoreRequest, + DeleteEntityTypeRequest, + DeleteFeatureRequest, + DeleteFeaturestoreRequest, + DestinationFeatureSetting, + ExportFeatureValuesOperationMetadata, + ExportFeatureValuesRequest, + ExportFeatureValuesResponse, + FeatureValueDestination, + GetEntityTypeRequest, + GetFeatureRequest, + GetFeaturestoreRequest, + ImportFeatureValuesOperationMetadata, + ImportFeatureValuesRequest, + ImportFeatureValuesResponse, + ListEntityTypesRequest, + ListEntityTypesResponse, + ListFeaturesRequest, + ListFeaturesResponse, + ListFeaturestoresRequest, + ListFeaturestoresResponse, + SearchFeaturesRequest, + SearchFeaturesResponse, + UpdateEntityTypeRequest, + UpdateFeatureRequest, + UpdateFeaturestoreOperationMetadata, + UpdateFeaturestoreRequest, +) +from .hyperparameter_tuning_job import ( + HyperparameterTuningJob, +) +from .index import ( + Index, +) +from .index_endpoint import ( + DeployedIndex, + DeployedIndexAuthConfig, + IndexEndpoint, + IndexPrivateEndpoints, +) +from .index_endpoint_service import ( + CreateIndexEndpointOperationMetadata, + CreateIndexEndpointRequest, + DeleteIndexEndpointRequest, + DeployIndexOperationMetadata, + DeployIndexRequest, + DeployIndexResponse, + GetIndexEndpointRequest, + ListIndexEndpointsRequest, + ListIndexEndpointsResponse, + MutateDeployedIndexOperationMetadata, + MutateDeployedIndexRequest, + MutateDeployedIndexResponse, + UndeployIndexOperationMetadata, + UndeployIndexRequest, + UndeployIndexResponse, + UpdateIndexEndpointRequest, +) +from .index_service import ( + CreateIndexOperationMetadata, + CreateIndexRequest, + DeleteIndexRequest, + GetIndexRequest, + ListIndexesRequest, + ListIndexesResponse, + NearestNeighborSearchOperationMetadata, + UpdateIndexOperationMetadata, + UpdateIndexRequest, +) +from .io import ( + AvroSource, + BigQueryDestination, + BigQuerySource, + ContainerRegistryDestination, + CsvDestination, + CsvSource, + GcsDestination, + GcsSource, + TFRecordDestination, +) +from .job_service import ( + CancelBatchPredictionJobRequest, + CancelCustomJobRequest, + CancelDataLabelingJobRequest, + CancelHyperparameterTuningJobRequest, + CreateBatchPredictionJobRequest, + CreateCustomJobRequest, + CreateDataLabelingJobRequest, + CreateHyperparameterTuningJobRequest, + CreateModelDeploymentMonitoringJobRequest, + DeleteBatchPredictionJobRequest, + DeleteCustomJobRequest, + DeleteDataLabelingJobRequest, + DeleteHyperparameterTuningJobRequest, + DeleteModelDeploymentMonitoringJobRequest, + GetBatchPredictionJobRequest, + GetCustomJobRequest, + GetDataLabelingJobRequest, + GetHyperparameterTuningJobRequest, + GetModelDeploymentMonitoringJobRequest, + ListBatchPredictionJobsRequest, + ListBatchPredictionJobsResponse, + ListCustomJobsRequest, + ListCustomJobsResponse, + ListDataLabelingJobsRequest, + ListDataLabelingJobsResponse, + ListHyperparameterTuningJobsRequest, + ListHyperparameterTuningJobsResponse, + ListModelDeploymentMonitoringJobsRequest, + ListModelDeploymentMonitoringJobsResponse, + PauseModelDeploymentMonitoringJobRequest, + ResumeModelDeploymentMonitoringJobRequest, + SearchModelDeploymentMonitoringStatsAnomaliesRequest, + SearchModelDeploymentMonitoringStatsAnomaliesResponse, + UpdateModelDeploymentMonitoringJobOperationMetadata, + UpdateModelDeploymentMonitoringJobRequest, +) +from .lineage_subgraph import ( + LineageSubgraph, +) +from .machine_resources import ( + AutomaticResources, + AutoscalingMetricSpec, + BatchDedicatedResources, + DedicatedResources, + DiskSpec, + MachineSpec, + NfsMount, + ResourcesConsumed, +) +from .manual_batch_tuning_parameters import ( + ManualBatchTuningParameters, +) +from .metadata_schema import ( + MetadataSchema, +) +from .metadata_service import ( + AddContextArtifactsAndExecutionsRequest, + AddContextArtifactsAndExecutionsResponse, + AddContextChildrenRequest, + AddContextChildrenResponse, + AddExecutionEventsRequest, + AddExecutionEventsResponse, + CreateArtifactRequest, + CreateContextRequest, + CreateExecutionRequest, + CreateMetadataSchemaRequest, + CreateMetadataStoreOperationMetadata, + CreateMetadataStoreRequest, + DeleteArtifactRequest, + DeleteContextRequest, + DeleteExecutionRequest, + DeleteMetadataStoreOperationMetadata, + DeleteMetadataStoreRequest, + GetArtifactRequest, + GetContextRequest, + GetExecutionRequest, + GetMetadataSchemaRequest, + GetMetadataStoreRequest, + ListArtifactsRequest, + ListArtifactsResponse, + ListContextsRequest, + ListContextsResponse, + ListExecutionsRequest, + ListExecutionsResponse, + ListMetadataSchemasRequest, + ListMetadataSchemasResponse, + ListMetadataStoresRequest, + ListMetadataStoresResponse, + PurgeArtifactsMetadata, + PurgeArtifactsRequest, + PurgeArtifactsResponse, + PurgeContextsMetadata, + PurgeContextsRequest, + PurgeContextsResponse, + PurgeExecutionsMetadata, + PurgeExecutionsRequest, + PurgeExecutionsResponse, + QueryArtifactLineageSubgraphRequest, + QueryContextLineageSubgraphRequest, + QueryExecutionInputsAndOutputsRequest, + UpdateArtifactRequest, + UpdateContextRequest, + UpdateExecutionRequest, +) +from .metadata_store import ( + MetadataStore, +) +from .migratable_resource import ( + MigratableResource, +) +from .migration_service import ( + BatchMigrateResourcesOperationMetadata, + BatchMigrateResourcesRequest, + BatchMigrateResourcesResponse, + MigrateResourceRequest, + MigrateResourceResponse, + SearchMigratableResourcesRequest, + SearchMigratableResourcesResponse, +) +from .model import ( + Model, + ModelContainerSpec, + Port, + PredictSchemata, +) +from .model_deployment_monitoring_job import ( + ModelDeploymentMonitoringBigQueryTable, + ModelDeploymentMonitoringJob, + ModelDeploymentMonitoringObjectiveConfig, + ModelDeploymentMonitoringScheduleConfig, + ModelMonitoringStatsAnomalies, + ModelDeploymentMonitoringObjectiveType, +) +from .model_evaluation import ( + ModelEvaluation, +) +from .model_evaluation_slice import ( + ModelEvaluationSlice, +) +from .model_monitoring import ( + ModelMonitoringAlertConfig, + ModelMonitoringObjectiveConfig, + SamplingStrategy, + ThresholdConfig, +) +from .model_service import ( + DeleteModelRequest, + DeleteModelVersionRequest, + ExportModelOperationMetadata, + ExportModelRequest, + ExportModelResponse, + GetModelEvaluationRequest, + GetModelEvaluationSliceRequest, + GetModelRequest, + ImportModelEvaluationRequest, + ListModelEvaluationSlicesRequest, + ListModelEvaluationSlicesResponse, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, + ListModelsRequest, + ListModelsResponse, + ListModelVersionsRequest, + ListModelVersionsResponse, + MergeVersionAliasesRequest, + UpdateExplanationDatasetOperationMetadata, + UpdateExplanationDatasetRequest, + UpdateExplanationDatasetResponse, + UpdateModelRequest, + UploadModelOperationMetadata, + UploadModelRequest, + UploadModelResponse, +) +from .operation import ( + DeleteOperationMetadata, + GenericOperationMetadata, +) +from .pipeline_job import ( + PipelineJob, + PipelineJobDetail, + PipelineTaskDetail, + PipelineTaskExecutorDetail, + PipelineTemplateMetadata, +) +from .pipeline_service import ( + CancelPipelineJobRequest, + CancelTrainingPipelineRequest, + CreatePipelineJobRequest, + CreateTrainingPipelineRequest, + DeletePipelineJobRequest, + DeleteTrainingPipelineRequest, + GetPipelineJobRequest, + GetTrainingPipelineRequest, + ListPipelineJobsRequest, + ListPipelineJobsResponse, + ListTrainingPipelinesRequest, + ListTrainingPipelinesResponse, +) +from .prediction_service import ( + ExplainRequest, + ExplainResponse, + PredictRequest, + PredictResponse, + RawPredictRequest, +) +from .specialist_pool import ( + SpecialistPool, +) +from .specialist_pool_service import ( + CreateSpecialistPoolOperationMetadata, + CreateSpecialistPoolRequest, + DeleteSpecialistPoolRequest, + GetSpecialistPoolRequest, + ListSpecialistPoolsRequest, + ListSpecialistPoolsResponse, + UpdateSpecialistPoolOperationMetadata, + UpdateSpecialistPoolRequest, +) +from .study import ( + Measurement, + Study, + StudySpec, + Trial, +) +from .tensorboard import ( + Tensorboard, +) +from .tensorboard_data import ( + Scalar, + TensorboardBlob, + TensorboardBlobSequence, + TensorboardTensor, + TimeSeriesData, + TimeSeriesDataPoint, +) +from .tensorboard_experiment import ( + TensorboardExperiment, +) +from .tensorboard_run import ( + TensorboardRun, +) +from .tensorboard_service import ( + BatchCreateTensorboardRunsRequest, + BatchCreateTensorboardRunsResponse, + BatchCreateTensorboardTimeSeriesRequest, + BatchCreateTensorboardTimeSeriesResponse, + BatchReadTensorboardTimeSeriesDataRequest, + BatchReadTensorboardTimeSeriesDataResponse, + CreateTensorboardExperimentRequest, + CreateTensorboardOperationMetadata, + CreateTensorboardRequest, + CreateTensorboardRunRequest, + CreateTensorboardTimeSeriesRequest, + DeleteTensorboardExperimentRequest, + DeleteTensorboardRequest, + DeleteTensorboardRunRequest, + DeleteTensorboardTimeSeriesRequest, + ExportTensorboardTimeSeriesDataRequest, + ExportTensorboardTimeSeriesDataResponse, + GetTensorboardExperimentRequest, + GetTensorboardRequest, + GetTensorboardRunRequest, + GetTensorboardTimeSeriesRequest, + ListTensorboardExperimentsRequest, + ListTensorboardExperimentsResponse, + ListTensorboardRunsRequest, + ListTensorboardRunsResponse, + ListTensorboardsRequest, + ListTensorboardsResponse, + ListTensorboardTimeSeriesRequest, + ListTensorboardTimeSeriesResponse, + ReadTensorboardBlobDataRequest, + ReadTensorboardBlobDataResponse, + ReadTensorboardTimeSeriesDataRequest, + ReadTensorboardTimeSeriesDataResponse, + UpdateTensorboardExperimentRequest, + UpdateTensorboardOperationMetadata, + UpdateTensorboardRequest, + UpdateTensorboardRunRequest, + UpdateTensorboardTimeSeriesRequest, + WriteTensorboardExperimentDataRequest, + WriteTensorboardExperimentDataResponse, + WriteTensorboardRunDataRequest, + WriteTensorboardRunDataResponse, +) +from .tensorboard_time_series import ( + TensorboardTimeSeries, +) +from .training_pipeline import ( + FilterSplit, + FractionSplit, + InputDataConfig, + PredefinedSplit, + StratifiedSplit, + TimestampSplit, + TrainingPipeline, +) +from .types import ( + BoolArray, + DoubleArray, + Int64Array, + StringArray, +) +from .unmanaged_container_model import ( + UnmanagedContainerModel, +) +from .user_action_reference import ( + UserActionReference, +) +from .value import ( + Value, +) +from .vizier_service import ( + AddTrialMeasurementRequest, + CheckTrialEarlyStoppingStateMetatdata, + CheckTrialEarlyStoppingStateRequest, + CheckTrialEarlyStoppingStateResponse, + CompleteTrialRequest, + CreateStudyRequest, + CreateTrialRequest, + DeleteStudyRequest, + DeleteTrialRequest, + GetStudyRequest, + GetTrialRequest, + ListOptimalTrialsRequest, + ListOptimalTrialsResponse, + ListStudiesRequest, + ListStudiesResponse, + ListTrialsRequest, + ListTrialsResponse, + LookupStudyRequest, + StopTrialRequest, + SuggestTrialsMetadata, + SuggestTrialsRequest, + SuggestTrialsResponse, +) + +__all__ = ( + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedIndexRef', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'PredictRequestResponseLoggingConfig', + 'PrivateEndpoints', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EntityType', + 'EnvVar', + 'Event', + 'Execution', + 'Attribution', + 'BlurBaselineConfig', + 'Examples', + 'ExamplesOverride', + 'ExamplesRestrictionsNamespace', + 'Explanation', + 'ExplanationMetadataOverride', + 'ExplanationParameters', + 'ExplanationSpec', + 'ExplanationSpecOverride', + 'FeatureNoiseSigma', + 'IntegratedGradientsAttribution', + 'ModelExplanation', + 'Neighbor', + 'Presets', + 'SampledShapleyAttribution', + 'SmoothGradConfig', + 'XraiAttribution', + 'ExplanationMetadata', + 'Feature', + 'FeatureStatsAnomaly', + 'FeatureSelector', + 'IdMatcher', + 'Featurestore', + 'FeaturestoreMonitoringConfig', + 'FeatureValue', + 'FeatureValueList', + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'BatchCreateFeaturesOperationMetadata', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'BatchReadFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesRequest', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeOperationMetadata', + 'CreateEntityTypeRequest', + 'CreateFeatureOperationMetadata', + 'CreateFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'CreateFeaturestoreRequest', + 'DeleteEntityTypeRequest', + 'DeleteFeatureRequest', + 'DeleteFeaturestoreRequest', + 'DestinationFeatureSetting', + 'ExportFeatureValuesOperationMetadata', + 'ExportFeatureValuesRequest', + 'ExportFeatureValuesResponse', + 'FeatureValueDestination', + 'GetEntityTypeRequest', + 'GetFeatureRequest', + 'GetFeaturestoreRequest', + 'ImportFeatureValuesOperationMetadata', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateEntityTypeRequest', + 'UpdateFeatureRequest', + 'UpdateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreRequest', + 'HyperparameterTuningJob', + 'Index', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexEndpoint', + 'IndexPrivateEndpoints', + 'CreateIndexEndpointOperationMetadata', + 'CreateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexOperationMetadata', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'MutateDeployedIndexOperationMetadata', + 'MutateDeployedIndexRequest', + 'MutateDeployedIndexResponse', + 'UndeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UpdateIndexEndpointRequest', + 'CreateIndexOperationMetadata', + 'CreateIndexRequest', + 'DeleteIndexRequest', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'NearestNeighborSearchOperationMetadata', + 'UpdateIndexOperationMetadata', + 'UpdateIndexRequest', + 'AvroSource', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'CsvDestination', + 'CsvSource', + 'GcsDestination', + 'GcsSource', + 'TFRecordDestination', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'GetModelDeploymentMonitoringJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + 'UpdateModelDeploymentMonitoringJobRequest', + 'JobState', + 'LineageSubgraph', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'NfsMount', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MetadataSchema', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'CreateArtifactRequest', + 'CreateContextRequest', + 'CreateExecutionRequest', + 'CreateMetadataSchemaRequest', + 'CreateMetadataStoreOperationMetadata', + 'CreateMetadataStoreRequest', + 'DeleteArtifactRequest', + 'DeleteContextRequest', + 'DeleteExecutionRequest', + 'DeleteMetadataStoreOperationMetadata', + 'DeleteMetadataStoreRequest', + 'GetArtifactRequest', + 'GetContextRequest', + 'GetExecutionRequest', + 'GetMetadataSchemaRequest', + 'GetMetadataStoreRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'ListContextsRequest', + 'ListContextsResponse', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'PurgeArtifactsMetadata', + 'PurgeArtifactsRequest', + 'PurgeArtifactsResponse', + 'PurgeContextsMetadata', + 'PurgeContextsRequest', + 'PurgeContextsResponse', + 'PurgeExecutionsMetadata', + 'PurgeExecutionsRequest', + 'PurgeExecutionsResponse', + 'QueryArtifactLineageSubgraphRequest', + 'QueryContextLineageSubgraphRequest', + 'QueryExecutionInputsAndOutputsRequest', + 'UpdateArtifactRequest', + 'UpdateContextRequest', + 'UpdateExecutionRequest', + 'MetadataStore', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + 'ModelDeploymentMonitoringObjectiveType', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'ModelMonitoringAlertConfig', + 'ModelMonitoringObjectiveConfig', + 'SamplingStrategy', + 'ThresholdConfig', + 'DeleteModelRequest', + 'DeleteModelVersionRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ImportModelEvaluationRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListModelVersionsRequest', + 'ListModelVersionsResponse', + 'MergeVersionAliasesRequest', + 'UpdateExplanationDatasetOperationMetadata', + 'UpdateExplanationDatasetRequest', + 'UpdateExplanationDatasetResponse', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineFailurePolicy', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'PipelineTemplateMetadata', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'ExplainRequest', + 'ExplainResponse', + 'PredictRequest', + 'PredictResponse', + 'RawPredictRequest', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'Study', + 'StudySpec', + 'Trial', + 'Tensorboard', + 'Scalar', + 'TensorboardBlob', + 'TensorboardBlobSequence', + 'TensorboardTensor', + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'TensorboardExperiment', + 'TensorboardRun', + 'BatchCreateTensorboardRunsRequest', + 'BatchCreateTensorboardRunsResponse', + 'BatchCreateTensorboardTimeSeriesRequest', + 'BatchCreateTensorboardTimeSeriesResponse', + 'BatchReadTensorboardTimeSeriesDataRequest', + 'BatchReadTensorboardTimeSeriesDataResponse', + 'CreateTensorboardExperimentRequest', + 'CreateTensorboardOperationMetadata', + 'CreateTensorboardRequest', + 'CreateTensorboardRunRequest', + 'CreateTensorboardTimeSeriesRequest', + 'DeleteTensorboardExperimentRequest', + 'DeleteTensorboardRequest', + 'DeleteTensorboardRunRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'GetTensorboardExperimentRequest', + 'GetTensorboardRequest', + 'GetTensorboardRunRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'UpdateTensorboardExperimentRequest', + 'UpdateTensorboardOperationMetadata', + 'UpdateTensorboardRequest', + 'UpdateTensorboardRunRequest', + 'UpdateTensorboardTimeSeriesRequest', + 'WriteTensorboardExperimentDataRequest', + 'WriteTensorboardExperimentDataResponse', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'TensorboardTimeSeries', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'StratifiedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + 'UnmanagedContainerModel', + 'UserActionReference', + 'Value', + 'AddTrialMeasurementRequest', + 'CheckTrialEarlyStoppingStateMetatdata', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CompleteTrialRequest', + 'CreateStudyRequest', + 'CreateTrialRequest', + 'DeleteStudyRequest', + 'DeleteTrialRequest', + 'GetStudyRequest', + 'GetTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'LookupStudyRequest', + 'StopTrialRequest', + 'SuggestTrialsMetadata', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py new file mode 100644 index 0000000000..3fb9cac428 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'AcceleratorType', + }, +) + + +class AcceleratorType(proto.Enum): + r"""Represents a hardware accelerator type.""" + ACCELERATOR_TYPE_UNSPECIFIED = 0 + NVIDIA_TESLA_K80 = 1 + NVIDIA_TESLA_P100 = 2 + NVIDIA_TESLA_V100 = 3 + NVIDIA_TESLA_P4 = 4 + NVIDIA_TESLA_T4 = 5 + NVIDIA_TESLA_A100 = 8 + TPU_V2 = 6 + TPU_V3 = 7 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py new file mode 100644 index 0000000000..86fa41091f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import user_action_reference +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Annotation', + }, +) + + +class Annotation(proto.Message): + r"""Used to assign specific AnnotationSpec to a particular area + of a DataItem or the whole part of the DataItem. + + Attributes: + name (str): + Output only. Resource name of the Annotation. + payload_schema_uri (str): + Required. Google Cloud Storage URI points to a YAML file + describing + [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. + The schema is defined as an `OpenAPI 3.0.2 Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/, + note that the chosen schema must be consistent with the + parent Dataset's + [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]. + payload (google.protobuf.struct_pb2.Value): + Required. The schema of the payload can be found in + [payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was last updated. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + annotation_source (google.cloud.aiplatform_v1beta1.types.UserActionReference): + Output only. The source of the Annotation. + labels (Mapping[str, str]): + Optional. The labels with user-defined metadata to organize + your Annotations. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Annotation(System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Annotation: + + - "aiplatform.googleapis.com/annotation_set_name": + optional, name of the UI's annotation set this Annotation + belongs to. If not set, the Annotation is not visible in + the UI. + + - "aiplatform.googleapis.com/payload_schema": output only, + its value is the + [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] + title. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + payload_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + payload = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + annotation_source = proto.Field( + proto.MESSAGE, + number=5, + message=user_action_reference.UserActionReference, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py new file mode 100644 index 0000000000..30b20f277b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'AnnotationSpec', + }, +) + + +class AnnotationSpec(proto.Message): + r"""Identifies a concept with which DataItems may be annotated + with. + + Attributes: + name (str): + Output only. Resource name of the + AnnotationSpec. + display_name (str): + Required. The user-defined name of the + AnnotationSpec. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + AnnotationSpec was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when AnnotationSpec + was last updated. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py new file mode 100644 index 0000000000..ac5ec47310 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Artifact', + }, +) + + +class Artifact(proto.Message): + r"""Instance of a general artifact. + + Attributes: + name (str): + Output only. The resource name of the + Artifact. + display_name (str): + User provided display name of the Artifact. + May be up to 128 Unicode characters. + uri (str): + The uniform resource identifier of the + artifact file. May be empty if there is no + actual artifact file. + etag (str): + An eTag used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Artifacts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Artifact (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + last updated. + state (google.cloud.aiplatform_v1beta1.types.Artifact.State): + The state of this Artifact. This is a + property of the Artifact, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex AI + Pipelines), and the system does not prescribe or + check the validity of state transitions. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Artifact. + The size of this field should not exceed 200KB. + description (str): + Description of the Artifact + """ + class State(proto.Enum): + r"""Describes the state of the Artifact.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + LIVE = 2 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + uri = proto.Field( + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + state = proto.Field( + proto.ENUM, + number=13, + enum=State, + ) + schema_title = proto.Field( + proto.STRING, + number=14, + ) + schema_version = proto.Field( + proto.STRING, + number=15, + ) + metadata = proto.Field( + proto.MESSAGE, + number=16, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=17, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py new file mode 100644 index 0000000000..176f03aead --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -0,0 +1,527 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import completion_stats as gca_completion_stats +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model as gca_unmanaged_container_model +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'BatchPredictionJob', + }, +) + + +class BatchPredictionJob(proto.Message): + r"""A job that uses a + [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to + produce predictions on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the instances fail, the + job may finish without attempting predictions for all remaining + instances. + + Attributes: + name (str): + Output only. Resource name of the + BatchPredictionJob. + display_name (str): + Required. The user-defined name of this + BatchPredictionJob. + model (str): + The name of the Model resoure that produces the predictions + via this job, must share the same ancestor Location. + Starting this job has no impact on any existing deployments + of the Model and their resources. Exactly one of model and + unmanaged_container_model must be set. + + The model resource name may contain version id or version + alias to specify the version, if no version is specified, + the default version will be used. + model_version_id (str): + Output only. The version ID of the Model that + produces the predictions via this job. + unmanaged_container_model (google.cloud.aiplatform_v1beta1.types.UnmanagedContainerModel): + Contains model information necessary to perform batch + prediction without requiring uploading to model registry. + Exactly one of model and unmanaged_container_model must be + set. + input_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.InputConfig): + Required. Input configuration of the instances on which + predictions are performed. The schema of any single instance + may be specified via the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + model_parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the predictions. The schema of + the parameters may be specified via the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + output_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputConfig): + Required. The Configuration specifying where output + predictions should be written. The schema of any single + prediction may be specified as a concatenation of + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + dedicated_resources (google.cloud.aiplatform_v1beta1.types.BatchDedicatedResources): + The config of resources used by the Model during the batch + prediction. If the Model + [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types] + DEDICATED_RESOURCES this config may be provided (and the job + will use these resources), if the Model doesn't support + AUTOMATIC_RESOURCES, this config must be provided. + service_account (str): + The service account that the DeployedModel's container runs + as. If not specified, a system generated one will be used, + which has minimal permissions and the custom container, if + used, may not have enough permission to access other GCP + resources. + + Users deploying the Model must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + manual_batch_tuning_parameters (google.cloud.aiplatform_v1beta1.types.ManualBatchTuningParameters): + Immutable. Parameters configuring the batch behavior. + Currently only applicable when + [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] + are used (in other cases Vertex AI does the tuning itself). + generate_explanation (bool): + Generate explanation with the batch prediction results. + + When set to ``true``, the batch prediction output changes + based on the ``predictions_format`` field of the + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config] + object: + + - ``bigquery``: output includes a column named + ``explanation``. The value is a struct that conforms to + the + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] + object. + - ``jsonl``: The JSON objects on each line include an + additional entry keyed ``explanation``. The value of the + entry is a JSON object that conforms to the + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] + object. + - ``csv``: Generating explanations for CSV format is not + supported. + + If this field is set to true, either the + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + or + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + must be populated. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + Explanation configuration for this BatchPredictionJob. Can + be specified only if + [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] + is set to ``true``. + + This value overrides the value of + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. + All fields of + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + are optional in the request. If a field of the + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + object is not populated, the corresponding field of the + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + object is inherited. + output_info (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputInfo): + Output only. Information further describing + the output of this job. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + For example, single files that can't be read. + This field never exceeds 20 entries. + Status details fields contain standard GCP error + details. + resources_consumed (google.cloud.aiplatform_v1beta1.types.ResourcesConsumed): + Output only. Information about resources that + had been consumed by this job. Provided in real + time at best effort basis, as well as a final + value once the job completes. + + Note: This field currently may be not populated + for batch predictions that use AutoML Models. + completion_stats (google.cloud.aiplatform_v1beta1.types.CompletionStats): + Output only. Statistics on completed and + failed prediction instances. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob for the first + time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob entered any of + the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was most recently updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize BatchPredictionJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key options for a + BatchPredictionJob. If this is set, then all + resources created by the BatchPredictionJob will + be encrypted with the provided encryption key. + """ + + class InputConfig(proto.Message): + r"""Configures the input to + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + See + [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] + for Model's supported input formats, and how instances should be + expressed via any of them. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Cloud Storage location for the input + instances. + + This field is a member of `oneof`_ ``source``. + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + The BigQuery location of the input table. + The schema of the table should be in the format + described by the given context OpenAPI Schema, + if one is provided. The table may contain + additional columns that are not described by the + schema, and they will be ignored. + + This field is a member of `oneof`_ ``source``. + instances_format (str): + Required. The format in which instances are given, must be + one of the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message=io.GcsSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message=io.BigQuerySource, + ) + instances_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputConfig(proto.Message): + r"""Configures the output of + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + See + [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] + for supported output formats, and how predictions are expressed via + any of them. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location of the directory where the output + is to be written to. In the given directory a new directory + is created. Its name is + ``prediction--``, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + Inside of it files ``predictions_0001.``, + ``predictions_0002.``, ..., + ``predictions_N.`` are created where + ```` depends on chosen + [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], + and N may equal 0001 and depends on the total number of + successfully predicted instances. If the Model has both + [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] + schemata defined then each such file contains predictions as + per the + [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format]. + If prediction for any instance failed (partially or + completely), then an additional ``errors_0001.``, + ``errors_0002.``,..., ``errors_N.`` + files are created (N depends on total number of failed + predictions). These files contain the failed instances, as + per their schema, followed by an additional ``error`` field + which as value has [google.rpc.Status][google.rpc.Status] + containing only ``code`` and ``message`` fields. + + This field is a member of `oneof`_ ``destination``. + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + The BigQuery project or dataset location where the output is + to be written to. If project is provided, a new dataset is + created with name + ``prediction__`` where + is made BigQuery-dataset-name compatible (for example, most + special characters become underscores), and timestamp is in + YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and + ``errors``. If the Model has both + [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] + schemata defined then the tables have columns as follows: + The ``predictions`` table contains instances for which the + prediction succeeded, it has columns as per a concatenation + of the Model's instance and prediction schemata. The + ``errors`` table contains rows for which the prediction has + failed, it has instance columns, as per the instance schema, + followed by a single "errors" column, which as values has + [google.rpc.Status][google.rpc.Status] represented as a + STRUCT, and containing only ``code`` and ``message``. + + This field is a member of `oneof`_ ``destination``. + predictions_format (str): + Required. The format in which Vertex AI gives the + predictions, must be one of the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.BigQueryDestination, + ) + predictions_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputInfo(proto.Message): + r"""Further describes this job's output. Supplements + [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_output_directory (str): + Output only. The full path of the Cloud + Storage directory created, into which the + prediction output is written. + + This field is a member of `oneof`_ ``output_location``. + bigquery_output_dataset (str): + Output only. The path of the BigQuery dataset created, in + ``bq://projectId.bqDatasetId`` format, into which the + prediction output is written. + + This field is a member of `oneof`_ ``output_location``. + bigquery_output_table (str): + Output only. The name of the BigQuery table created, in + ``predictions_`` format, into which the + prediction output is written. Can be used by UI to generate + the BigQuery output path, for example. + """ + + gcs_output_directory = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + bigquery_output_dataset = proto.Field( + proto.STRING, + number=2, + oneof='output_location', + ) + bigquery_output_table = proto.Field( + proto.STRING, + number=4, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + model = proto.Field( + proto.STRING, + number=3, + ) + model_version_id = proto.Field( + proto.STRING, + number=30, + ) + unmanaged_container_model = proto.Field( + proto.MESSAGE, + number=28, + message=gca_unmanaged_container_model.UnmanagedContainerModel, + ) + input_config = proto.Field( + proto.MESSAGE, + number=4, + message=InputConfig, + ) + model_parameters = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + output_config = proto.Field( + proto.MESSAGE, + number=6, + message=OutputConfig, + ) + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + message=machine_resources.BatchDedicatedResources, + ) + service_account = proto.Field( + proto.STRING, + number=29, + ) + manual_batch_tuning_parameters = proto.Field( + proto.MESSAGE, + number=8, + message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, + ) + generate_explanation = proto.Field( + proto.BOOL, + number=23, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=25, + message=explanation.ExplanationSpec, + ) + output_info = proto.Field( + proto.MESSAGE, + number=9, + message=OutputInfo, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + error = proto.Field( + proto.MESSAGE, + number=11, + message=status_pb2.Status, + ) + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=12, + message=status_pb2.Status, + ) + resources_consumed = proto.Field( + proto.MESSAGE, + number=13, + message=machine_resources.ResourcesConsumed, + ) + completion_stats = proto.Field( + proto.MESSAGE, + number=14, + message=gca_completion_stats.CompletionStats, + ) + create_time = proto.Field( + proto.MESSAGE, + number=15, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=16, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=17, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=18, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=19, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py new file mode 100644 index 0000000000..027a67b940 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CompletionStats', + }, +) + + +class CompletionStats(proto.Message): + r"""Success and error statistics of processing multiple entities + (for example, DataItems or structured data rows) in batch. + + Attributes: + successful_count (int): + Output only. The number of entities that had + been processed successfully. + failed_count (int): + Output only. The number of entities for which + any error was encountered. + incomplete_count (int): + Output only. In cases when enough errors are + encountered a job, pipeline, or operation may be + failed as a whole. Below is the number of + entities for which the processing had not been + finished (either in successful or failed state). + Set to -1 if the number is unknown (for example, + the operation failed before the total entity + number could be collected). + successful_forecast_point_count (int): + Output only. The number of the successful + forecast points that are generated by the + forecasting model. This is ONLY used by the + forecasting batch prediction. + """ + + successful_count = proto.Field( + proto.INT64, + number=1, + ) + failed_count = proto.Field( + proto.INT64, + number=2, + ) + incomplete_count = proto.Field( + proto.INT64, + number=3, + ) + successful_forecast_point_count = proto.Field( + proto.INT64, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py new file mode 100644 index 0000000000..207d7cb0d6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Context', + }, +) + + +class Context(proto.Message): + r"""Instance of a general context. + + Attributes: + name (str): + Output only. The resource name of the + Context. + display_name (str): + User provided display name of the Context. + May be up to 128 Unicode characters. + etag (str): + An eTag used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Contexts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Context (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + last updated. + parent_contexts (Sequence[str]): + Output only. A list of resource names of Contexts that are + parents of this Context. A Context may have at most 10 + parent_contexts. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Context. + The size of this field should not exceed 200KB. + description (str): + Description of the Context + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=9, + ) + create_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + parent_contexts = proto.RepeatedField( + proto.STRING, + number=12, + ) + schema_title = proto.Field( + proto.STRING, + number=13, + ) + schema_version = proto.Field( + proto.STRING, + number=14, + ) + metadata = proto.Field( + proto.MESSAGE, + number=15, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py new file mode 100644 index 0000000000..e0827f6f4e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -0,0 +1,476 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CustomJob', + 'CustomJobSpec', + 'WorkerPoolSpec', + 'ContainerSpec', + 'PythonPackageSpec', + 'Scheduling', + }, +) + + +class CustomJob(proto.Message): + r"""Represents a job that runs custom workloads such as a Docker + container or a Python package. A CustomJob can have multiple + worker pools and each worker pool can have its own machine and + input spec. A CustomJob will be cleaned up once the job enters + terminal state (failed or succeeded). + + Attributes: + name (str): + Output only. Resource name of a CustomJob. + display_name (str): + Required. The display name of the CustomJob. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): + Required. Job spec. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was + created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob for the first time + entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob entered any of the + following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was most + recently updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize CustomJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key options for a + CustomJob. If this is set, then all resources + created by the CustomJob will be encrypted with + the provided encryption key. + web_access_uris (Mapping[str, str]): + Output only. URIs for accessing `interactive + shells `__ + (one URI for each training node). Only available if + [job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] + is ``true``. + + The keys are names of each node in the training job; for + example, ``workerpool0-0`` for the primary node, + ``workerpool1-0`` for the first node in the second worker + pool, and ``workerpool1-1`` for the second node in the + second worker pool. + + The values are the URIs for each node's interactive shell. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + job_spec = proto.Field( + proto.MESSAGE, + number=4, + message='CustomJobSpec', + ) + state = proto.Field( + proto.ENUM, + number=5, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=12, + message=gca_encryption_spec.EncryptionSpec, + ) + web_access_uris = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + + +class CustomJobSpec(proto.Message): + r"""Represents the spec of a CustomJob. + Next Id: 14 + + Attributes: + worker_pool_specs (Sequence[google.cloud.aiplatform_v1beta1.types.WorkerPoolSpec]): + Required. The spec of the worker pools + including machine type and Docker image. All + worker pools except the first one are optional + and can be skipped by providing an empty value. + scheduling (google.cloud.aiplatform_v1beta1.types.Scheduling): + Scheduling options for a CustomJob. + service_account (str): + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this + run-as account. If unspecified, the `Vertex AI Custom Code + Service + Agent `__ + for the CustomJob's project is used. + network (str): + The full name of the Compute Engine + `network `__ + to which the Job should be peered. For example, + ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + To specify this field, you must have already `configured VPC + Network Peering for Vertex + AI `__. + + If this field is left unspecified, the job is not peered + with any network. + reserved_ip_ranges (Sequence[str]): + Optional. A list of names for the reserved ip ranges under + the VPC network that can be used for this job. + + If set, we will deploy the job within the provided ip + ranges. Otherwise, the job will be deployed to any ip ranges + under the provided VPC network. + + Example: ['vertex-ai-ip-range']. + base_output_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location to store the output of this + CustomJob or HyperparameterTuningJob. For + HyperparameterTuningJob, the baseOutputDirectory of each + child CustomJob backing a Trial is set to a subdirectory of + name [id][google.cloud.aiplatform.v1beta1.Trial.id] under + its parent HyperparameterTuningJob's baseOutputDirectory. + + The following Vertex AI environment variables will be passed + to containers or python modules when this field is set: + + For CustomJob: + + - AIP_MODEL_DIR = ``/model/`` + - AIP_CHECKPOINT_DIR = + ``/checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``/logs/`` + + For CustomJob backing a Trial of HyperparameterTuningJob: + + - AIP_MODEL_DIR = + ``//model/`` + - AIP_CHECKPOINT_DIR = + ``//checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``//logs/`` + tensorboard (str): + Optional. The name of a Vertex AI + [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + enable_web_access (bool): + Optional. Whether you want Vertex AI to enable `interactive + shell + access `__ + to training containers. + + If set to ``true``, you can access interactive shells at the + URIs given by + [CustomJob.web_access_uris][google.cloud.aiplatform.v1beta1.CustomJob.web_access_uris] + or + [Trial.web_access_uris][google.cloud.aiplatform.v1beta1.Trial.web_access_uris] + (within + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials]). + """ + + worker_pool_specs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkerPoolSpec', + ) + scheduling = proto.Field( + proto.MESSAGE, + number=3, + message='Scheduling', + ) + service_account = proto.Field( + proto.STRING, + number=4, + ) + network = proto.Field( + proto.STRING, + number=5, + ) + reserved_ip_ranges = proto.RepeatedField( + proto.STRING, + number=13, + ) + base_output_directory = proto.Field( + proto.MESSAGE, + number=6, + message=io.GcsDestination, + ) + tensorboard = proto.Field( + proto.STRING, + number=7, + ) + enable_web_access = proto.Field( + proto.BOOL, + number=10, + ) + + +class WorkerPoolSpec(proto.Message): + r"""Represents the spec of a worker pool in a job. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + container_spec (google.cloud.aiplatform_v1beta1.types.ContainerSpec): + The custom container task. + + This field is a member of `oneof`_ ``task``. + python_package_spec (google.cloud.aiplatform_v1beta1.types.PythonPackageSpec): + The Python packaged task. + + This field is a member of `oneof`_ ``task``. + machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): + Optional. Immutable. The specification of a + single machine. + replica_count (int): + Optional. The number of worker replicas to + use for this worker pool. + nfs_mounts (Sequence[google.cloud.aiplatform_v1beta1.types.NfsMount]): + Optional. List of NFS mount spec. + disk_spec (google.cloud.aiplatform_v1beta1.types.DiskSpec): + Disk spec. + """ + + container_spec = proto.Field( + proto.MESSAGE, + number=6, + oneof='task', + message='ContainerSpec', + ) + python_package_spec = proto.Field( + proto.MESSAGE, + number=7, + oneof='task', + message='PythonPackageSpec', + ) + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message=machine_resources.MachineSpec, + ) + replica_count = proto.Field( + proto.INT64, + number=2, + ) + nfs_mounts = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=machine_resources.NfsMount, + ) + disk_spec = proto.Field( + proto.MESSAGE, + number=5, + message=machine_resources.DiskSpec, + ) + + +class ContainerSpec(proto.Message): + r"""The spec of a Container. + + Attributes: + image_uri (str): + Required. The URI of a container image in the + Container Registry that is to be run on each + worker replica. + command (Sequence[str]): + The command to be invoked when the container + is started. It overrides the entrypoint + instruction in Dockerfile when provided. + args (Sequence[str]): + The arguments to be passed when starting the + container. + env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]): + Environment variables to be passed to the + container. Maximum limit is 100. + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=env_var.EnvVar, + ) + + +class PythonPackageSpec(proto.Message): + r"""The spec of a Python packaged code. + + Attributes: + executor_image_uri (str): + Required. The URI of a container image in Artifact Registry + that will run the provided Python package. Vertex AI + provides a wide range of executor images with pre-installed + packages to meet users' various use cases. See the list of + `pre-built containers for + training `__. + You must use an image from this list. + package_uris (Sequence[str]): + Required. The Google Cloud Storage location + of the Python package files which are the + training program and its dependent packages. The + maximum number of package URIs is 100. + python_module (str): + Required. The Python module name to run after + installing the packages. + args (Sequence[str]): + Command line arguments to be passed to the + Python task. + env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]): + Environment variables to be passed to the + python module. Maximum limit is 100. + """ + + executor_image_uri = proto.Field( + proto.STRING, + number=1, + ) + package_uris = proto.RepeatedField( + proto.STRING, + number=2, + ) + python_module = proto.Field( + proto.STRING, + number=3, + ) + args = proto.RepeatedField( + proto.STRING, + number=4, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=env_var.EnvVar, + ) + + +class Scheduling(proto.Message): + r"""All parameters related to queuing and scheduling of custom + jobs. + + Attributes: + timeout (google.protobuf.duration_pb2.Duration): + The maximum job running time. The default is + 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + """ + + timeout = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + restart_job_on_worker_restart = proto.Field( + proto.BOOL, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py new file mode 100644 index 0000000000..17737a45b3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DataItem', + }, +) + + +class DataItem(proto.Message): + r"""A piece of data in a Dataset. Could be an image, a video, a + document or plain text. + + Attributes: + name (str): + Output only. The resource name of the + DataItem. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + last updated. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your DataItems. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one DataItem(System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + payload (google.protobuf.struct_pb2.Value): + Required. The data that the DataItem represents (for + example, an image or a text snippet). The schema of the + payload is stored in the parent Dataset's [metadata + schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + dataItemSchemaUri field. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + payload = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py new file mode 100644 index 0000000000..3ec4f31c9e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import job_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DataLabelingJob', + 'ActiveLearningConfig', + 'SampleConfig', + 'TrainingConfig', + }, +) + + +class DataLabelingJob(proto.Message): + r"""DataLabelingJob is used to trigger a human labeling job on + unlabeled data from the following Dataset: + + Attributes: + name (str): + Output only. Resource name of the + DataLabelingJob. + display_name (str): + Required. The user-defined name of the + DataLabelingJob. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + Display name of a DataLabelingJob. + datasets (Sequence[str]): + Required. Dataset resource names. Right now we only support + labeling from a single Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + annotation_labels (Mapping[str, str]): + Labels to assign to annotations generated by + this DataLabelingJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. See https://goo.gl/xmQnxf for more + information and examples of labels. System + reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + labeler_count (int): + Required. Number of labelers to work on each + DataItem. + instruction_uri (str): + Required. The Google Cloud Storage location + of the instruction pdf. This pdf is shared with + labelers, and provides detailed description on + how to label DataItems in Datasets. + inputs_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing the config for a + specific type of DataLabelingJob. The schema + files that can be used here are found in the + https://storage.googleapis.com/google-cloud-aiplatform + bucket in the /schema/datalabelingjob/inputs/ + folder. + inputs (google.protobuf.struct_pb2.Value): + Required. Input config parameters for the + DataLabelingJob. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + labeling_progress (int): + Output only. Current labeling job progress percentage scaled + in interval [0, 100], indicating the percentage of DataItems + that has been finished. + current_spend (google.type.money_pb2.Money): + Output only. Estimated cost(in US dollars) + that the DataLabelingJob has incurred to date. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was updated most recently. + error (google.rpc.status_pb2.Status): + Output only. DataLabelingJob errors. It is only populated + when job's state is ``JOB_STATE_FAILED`` or + ``JOB_STATE_CANCELLED``. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + DataLabelingJobs. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each DataLabelingJob: + + - "aiplatform.googleapis.com/schema": output only, its + value is the + [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s + title. + specialist_pools (Sequence[str]): + The SpecialistPools' resource names + associated with this job. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + DataLabelingJob. If set, this DataLabelingJob + will be secured by this key. + Note: Annotations created in the DataLabelingJob + are associated with the EncryptionSpec of the + Dataset they are exported to. + active_learning_config (google.cloud.aiplatform_v1beta1.types.ActiveLearningConfig): + Parameters that configure the active learning + pipeline. Active learning will label the data + incrementally via several iterations. For every + iteration, it will select a batch of data based + on the sampling strategy. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + datasets = proto.RepeatedField( + proto.STRING, + number=3, + ) + annotation_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) + labeler_count = proto.Field( + proto.INT32, + number=4, + ) + instruction_uri = proto.Field( + proto.STRING, + number=5, + ) + inputs_schema_uri = proto.Field( + proto.STRING, + number=6, + ) + inputs = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Value, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=job_state.JobState, + ) + labeling_progress = proto.Field( + proto.INT32, + number=13, + ) + current_spend = proto.Field( + proto.MESSAGE, + number=14, + message=money_pb2.Money, + ) + create_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=22, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + specialist_pools = proto.RepeatedField( + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=20, + message=gca_encryption_spec.EncryptionSpec, + ) + active_learning_config = proto.Field( + proto.MESSAGE, + number=21, + message='ActiveLearningConfig', + ) + + +class ActiveLearningConfig(proto.Message): + r"""Parameters that configure the active learning pipeline. + Active learning will label the data incrementally by several + iterations. For every iteration, it will select a batch of data + based on the sampling strategy. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + max_data_item_count (int): + Max number of human labeled DataItems. + + This field is a member of `oneof`_ ``human_labeling_budget``. + max_data_item_percentage (int): + Max percent of total DataItems for human + labeling. + + This field is a member of `oneof`_ ``human_labeling_budget``. + sample_config (google.cloud.aiplatform_v1beta1.types.SampleConfig): + Active learning data sampling config. For + every active learning labeling iteration, it + will select a batch of data based on the + sampling strategy. + training_config (google.cloud.aiplatform_v1beta1.types.TrainingConfig): + CMLE training config. For every active + learning labeling iteration, system will train a + machine learning model on CMLE. The trained + model will be used by data sampling algorithm to + select DataItems. + """ + + max_data_item_count = proto.Field( + proto.INT64, + number=1, + oneof='human_labeling_budget', + ) + max_data_item_percentage = proto.Field( + proto.INT32, + number=2, + oneof='human_labeling_budget', + ) + sample_config = proto.Field( + proto.MESSAGE, + number=3, + message='SampleConfig', + ) + training_config = proto.Field( + proto.MESSAGE, + number=4, + message='TrainingConfig', + ) + + +class SampleConfig(proto.Message): + r"""Active learning data sampling config. For every active + learning labeling iteration, it will select a batch of data + based on the sampling strategy. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + initial_batch_sample_percentage (int): + The percentage of data needed to be labeled + in the first batch. + + This field is a member of `oneof`_ ``initial_batch_sample_size``. + following_batch_sample_percentage (int): + The percentage of data needed to be labeled + in each following batch (except the first + batch). + + This field is a member of `oneof`_ ``following_batch_sample_size``. + sample_strategy (google.cloud.aiplatform_v1beta1.types.SampleConfig.SampleStrategy): + Field to choose sampling strategy. Sampling + strategy will decide which data should be + selected for human labeling in every batch. + """ + class SampleStrategy(proto.Enum): + r"""Sample strategy decides which subset of DataItems should be + selected for human labeling in every batch. + """ + SAMPLE_STRATEGY_UNSPECIFIED = 0 + UNCERTAINTY = 1 + + initial_batch_sample_percentage = proto.Field( + proto.INT32, + number=1, + oneof='initial_batch_sample_size', + ) + following_batch_sample_percentage = proto.Field( + proto.INT32, + number=3, + oneof='following_batch_sample_size', + ) + sample_strategy = proto.Field( + proto.ENUM, + number=5, + enum=SampleStrategy, + ) + + +class TrainingConfig(proto.Message): + r"""CMLE training config. For every active learning labeling + iteration, system will train a machine learning model on CMLE. + The trained model will be used by data sampling algorithm to + select DataItems. + + Attributes: + timeout_training_milli_hours (int): + The timeout hours for the CMLE training job, + expressed in milli hours i.e. 1,000 value in + this field means 1 hour. + """ + + timeout_training_milli_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py new file mode 100644 index 0000000000..18ea4dd8a1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import io +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Dataset', + 'ImportDataConfig', + 'ExportDataConfig', + }, +) + + +class Dataset(proto.Message): + r"""A collection of DataItems and Annotations on them. + + Attributes: + name (str): + Output only. The resource name of the + Dataset. + display_name (str): + Required. The user-defined name of the + Dataset. The name can be up to 128 characters + long and can be consist of any UTF-8 characters. + description (str): + Optional. The description of the Dataset. + metadata_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing additional + information about the Dataset. The schema is + defined as an OpenAPI 3.0.2 Schema Object. The + schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/metadata/. + metadata (google.protobuf.struct_pb2.Value): + Required. Additional information about the + Dataset. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + last updated. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + output only, its value is the + [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + title. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Dataset. If set, this Dataset and all + sub-resources of this Dataset will be secured by + this key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metadata = proto.Field( + proto.MESSAGE, + number=8, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=11, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class ImportDataConfig(proto.Message): + r"""Describes the location from where we import data into a + Dataset, together with the labels that will be applied to the + DataItems and the Annotations. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Google Cloud Storage location for the + input content. + + This field is a member of `oneof`_ ``source``. + data_item_labels (Mapping[str, str]): + Labels that will be applied to newly imported DataItems. If + an identical DataItem as one being imported already exists + in the Dataset, then these labels will be appended to these + of the already existing one, and if labels with identical + key is imported before, the old label value will be + overwritten. If two DataItems are identical in the same + import data operation, the labels will be combined and if + key collision happens in this case, one of the values will + be picked randomly. Two DataItems are considered identical + if their content bytes are identical (e.g. image bytes or + pdf bytes). These labels will be overridden by Annotation + labels specified inside index file referenced by + [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], + e.g. jsonl file. + import_schema_uri (str): + Required. Points to a YAML file stored on Google Cloud + Storage describing the import format. Validation will be + done against the schema. The schema is defined as an + `OpenAPI 3.0.2 Schema + Object `__. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message=io.GcsSource, + ) + data_item_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + import_schema_uri = proto.Field( + proto.STRING, + number=4, + ) + + +class ExportDataConfig(proto.Message): + r"""Describes what part of the Dataset is to be exported, the + destination of the export and how to export. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Google Cloud Storage location where the output is to be + written to. In the given directory a new directory will be + created with name: + ``export-data--`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All export output will be written into that + directory. Inside that directory, annotations with the same + schema will be grouped into sub directories which are named + with the corresponding annotations' schema title. Inside + these sub directories, a schema.yaml will be created to + describe the output format. + + This field is a member of `oneof`_ ``destination``. + annotations_filter (str): + A filter on Annotations of the Dataset. Only Annotations on + to-be-exported DataItems(specified by [data_items_filter][]) + that match this filter will be exported. The filter syntax + is the same as in + [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message=io.GcsDestination, + ) + annotations_filter = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py new file mode 100644 index 0000000000..7763d76026 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -0,0 +1,543 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateDatasetRequest', + 'CreateDatasetOperationMetadata', + 'GetDatasetRequest', + 'UpdateDatasetRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'DeleteDatasetRequest', + 'ImportDataRequest', + 'ImportDataResponse', + 'ImportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'ExportDataOperationMetadata', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'GetAnnotationSpecRequest', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Dataset in. Format: + ``projects/{project}/locations/{location}`` + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + dataset = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.Dataset, + ) + + +class CreateDatasetOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + + Attributes: + name (str): + Required. The name of the Dataset resource. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + + Attributes: + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + """ + + dataset = proto.Field( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Attributes: + parent (str): + Required. The name of the Dataset's parent resource. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``display_name``: supports = and != + - ``metadata_schema_uri``: supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Attributes: + datasets (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset]): + A list of Datasets that matches the specified + filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + datasets = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): + Required. The desired input locations. The + contents of all input locations will be imported + in one batch. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + import_configs = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gca_dataset.ImportDataConfig, + ) + + +class ImportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + """ + + +class ImportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): + Required. The desired output location. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + export_config = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.ExportDataConfig, + ) + + +class ExportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + Attributes: + exported_files (Sequence[str]): + All of the files that are exported in this + export operation. + """ + + exported_files = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class ExportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + gcs_output_directory (str): + A Google Cloud Storage directory which path + ends with '/'. The exported data is stored in + the directory. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + + +class ListDataItemsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Attributes: + parent (str): + Required. The resource name of the Dataset to list DataItems + from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataItemsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Attributes: + data_items (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem]): + A list of DataItems that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_items = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=data_item.DataItem, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The name of the AnnotationSpec resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListAnnotationsRequest(proto.Message): + r"""Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Attributes: + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListAnnotationsResponse(proto.Message): + r"""Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Attributes: + annotations (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation]): + A list of Annotations that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + annotations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=annotation.Annotation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py new file mode 100644 index 0000000000..3ed040220f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DeployedIndexRef', + }, +) + + +class DeployedIndexRef(proto.Message): + r"""Points to a DeployedIndex. + + Attributes: + index_endpoint (str): + Immutable. A resource name of the + IndexEndpoint. + deployed_index_id (str): + Immutable. The ID of the DeployedIndex in the + above IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py new file mode 100644 index 0000000000..e9876ae073 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DeployedModelRef', + }, +) + + +class DeployedModelRef(proto.Message): + r"""Points to a DeployedModel. + + Attributes: + endpoint (str): + Immutable. A resource name of an Endpoint. + deployed_model_id (str): + Immutable. An ID of a DeployedModel in the + above Endpoint. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py new file mode 100644 index 0000000000..240901e591 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'EncryptionSpec', + }, +) + + +class EncryptionSpec(proto.Message): + r"""Represents a customer-managed encryption key spec that can be + applied to a top-level resource. + + Attributes: + kms_key_name (str): + Required. The Cloud KMS resource identifier of the customer + managed encryption key used to protect a resource. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + """ + + kms_key_name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py new file mode 100644 index 0000000000..daaf0e5d6f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -0,0 +1,428 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Endpoint', + 'DeployedModel', + 'PrivateEndpoints', + 'PredictRequestResponseLoggingConfig', + }, +) + + +class Endpoint(proto.Message): + r"""Models are deployed into it, and afterwards Endpoint is + called to obtain predictions and explanations. + + Attributes: + name (str): + Output only. The resource name of the + Endpoint. + display_name (str): + Required. The display name of the Endpoint. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Endpoint. + deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModel]): + Output only. The models deployed in this Endpoint. To add or + remove DeployedModels use + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] + and + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] + respectively. + traffic_split (Mapping[str, int]): + A map from a DeployedModel's ID to the + percentage of this Endpoint's traffic that + should be forwarded to that DeployedModel. + If a DeployedModel's ID is not listed in this + map, then it receives no traffic. + + The traffic percentage values must add up to + 100, or map must be empty if the Endpoint is to + not accept any traffic at a moment. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Endpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + last updated. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for an + Endpoint. If set, this Endpoint and all + sub-resources of this Endpoint will be secured + by this key. + network (str): + The full name of the Google Compute Engine + `network `__ + to which the Endpoint should be peered. + + Private services access must already be configured for the + network. If left unspecified, the Endpoint is not peered + with any network. + + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.Endpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], + can be set. + + `Format `__: + ``projects/{project}/global/networks/{network}``. Where + ``{project}`` is a project number, as in ``12345``, and + ``{network}`` is network name. + enable_private_service_connect (bool): + Deprecated: If true, expose the Endpoint via private service + connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.Endpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], + can be set. + model_deployment_monitoring_job (str): + Output only. Resource name of the Model Monitoring job + associated with this Endpoint if monitoring is enabled by + [CreateModelDeploymentMonitoringJob][]. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + predict_request_response_logging_config (google.cloud.aiplatform_v1beta1.types.PredictRequestResponseLoggingConfig): + Configures the request-response logging for + online prediction. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='DeployedModel', + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=5, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + create_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=10, + message=gca_encryption_spec.EncryptionSpec, + ) + network = proto.Field( + proto.STRING, + number=13, + ) + enable_private_service_connect = proto.Field( + proto.BOOL, + number=17, + ) + model_deployment_monitoring_job = proto.Field( + proto.STRING, + number=14, + ) + predict_request_response_logging_config = proto.Field( + proto.MESSAGE, + number=18, + message='PredictRequestResponseLoggingConfig', + ) + + +class DeployedModel(proto.Message): + r"""A deployment of a Model. Endpoints contain one or more + DeployedModels. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + dedicated_resources (google.cloud.aiplatform_v1beta1.types.DedicatedResources): + A description of resources that are dedicated + to the DeployedModel, and that need a higher + degree of manual configuration. + + This field is a member of `oneof`_ ``prediction_resources``. + automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): + A description of resources that to large + degree are decided by Vertex AI, and require + only a modest additional configuration. + + This field is a member of `oneof`_ ``prediction_resources``. + id (str): + Immutable. The ID of the DeployedModel. If not provided upon + deployment, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. + model (str): + Required. The resource name of the Model that + this is the deployment of. Note that the Model + may be in a different location than the + DeployedModel's Endpoint. + The resource name may contain version id or + version alias to specify the version, if no + version is specified, the default version will + be deployed. + model_version_id (str): + Output only. The version ID of the model that + is deployed. + display_name (str): + The display name of the DeployedModel. If not provided upon + creation, the Model's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedModel + was created. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + Explanation configuration for this DeployedModel. + + When deploying a Model using + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], + this value overrides the value of + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. + All fields of + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + are optional in the request. If a field of + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + is not populated, the value of the same field of + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + is inherited. If the corresponding + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + is not populated, all fields of the + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + will be used for the explanation configuration. + service_account (str): + The service account that the DeployedModel's container runs + as. Specify the email address of the service account. If + this service account is not specified, the container runs as + a service account that doesn't have access to the resource + project. + + Users deploying the Model must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + enable_container_logging (bool): + If true, the container of the DeployedModel instances will + send ``stderr`` and ``stdout`` streams to Stackdriver + Logging. + + Only supported for custom-trained Models and AutoML Tabular + Models. + enable_access_logging (bool): + These logs are like standard server access + logs, containing information like timestamp and + latency for each prediction request. + Note that Stackdriver logs may incur a cost, + especially if your project receives prediction + requests at a high queries per second rate + (QPS). Estimate your costs before enabling this + option. + private_endpoints (google.cloud.aiplatform_v1beta1.types.PrivateEndpoints): + Output only. Provide paths for users to send + predict/explain/health requests directly to the deployed + model services running on Cloud via private services access. + This field is populated if + [network][google.cloud.aiplatform.v1beta1.Endpoint.network] + is configured. + """ + + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + oneof='prediction_resources', + message=machine_resources.DedicatedResources, + ) + automatic_resources = proto.Field( + proto.MESSAGE, + number=8, + oneof='prediction_resources', + message=machine_resources.AutomaticResources, + ) + id = proto.Field( + proto.STRING, + number=1, + ) + model = proto.Field( + proto.STRING, + number=2, + ) + model_version_id = proto.Field( + proto.STRING, + number=18, + ) + display_name = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=9, + message=explanation.ExplanationSpec, + ) + service_account = proto.Field( + proto.STRING, + number=11, + ) + enable_container_logging = proto.Field( + proto.BOOL, + number=12, + ) + enable_access_logging = proto.Field( + proto.BOOL, + number=13, + ) + private_endpoints = proto.Field( + proto.MESSAGE, + number=14, + message='PrivateEndpoints', + ) + + +class PrivateEndpoints(proto.Message): + r"""PrivateEndpoints proto is used to provide paths for users to send + requests privately. To send request via private service access, use + predict_http_uri, explain_http_uri or health_http_uri. To send + request via private service connect, use service_attachment. + + Attributes: + predict_http_uri (str): + Output only. Http(s) path to send prediction + requests. + explain_http_uri (str): + Output only. Http(s) path to send explain + requests. + health_http_uri (str): + Output only. Http(s) path to send health + check requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. + """ + + predict_http_uri = proto.Field( + proto.STRING, + number=1, + ) + explain_http_uri = proto.Field( + proto.STRING, + number=2, + ) + health_http_uri = proto.Field( + proto.STRING, + number=3, + ) + service_attachment = proto.Field( + proto.STRING, + number=4, + ) + + +class PredictRequestResponseLoggingConfig(proto.Message): + r"""Configuration for logging request-response to a BigQuery + table. + + Attributes: + enabled (bool): + If logging is enabled or not. + sampling_rate (float): + Percentage of requests to be logged, expressed as a fraction + in range(0,1]. + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + BigQuery table for logging. If only given a project, a new + dataset will be created with name + ``logging__`` where will + be made BigQuery-dataset-name compatible (e.g. most special + characters will become underscores). If no table name is + given, a new table will be created with name + ``request_response_logging`` + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + sampling_rate = proto.Field( + proto.DOUBLE, + number=2, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=3, + message=io.BigQueryDestination, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py new file mode 100644 index 0000000000..76eb5bf89c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateEndpointRequest', + 'CreateEndpointOperationMetadata', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UpdateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelRequest', + 'DeployModelResponse', + 'DeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UndeployModelOperationMetadata', + }, +) + + +class CreateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Endpoint in. Format: + ``projects/{project}/locations/{location}`` + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint to create. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become the + final component of the endpoint resource name. If not + provided, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. When using HTTP/JSON, this field is populated + based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields that + are not included in either the URI or the body. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + endpoint = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.Endpoint, + ) + endpoint_id = proto.Field( + proto.STRING, + number=4, + ) + + +class CreateEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + + Attributes: + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEndpointsRequest(proto.Message): + r"""Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``endpoint`` supports = and !=. ``endpoint`` represents + the Endpoint ID, i.e. the last segment of the Endpoint's + [resource + name][google.cloud.aiplatform.v1beta1.Endpoint.name]. + - ``display_name`` supports = and, != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``endpoint=1`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] + of the previous + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListEndpointsResponse(proto.Message): + r"""Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Attributes: + endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint]): + List of Endpoints in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + endpoints = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + + Attributes: + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + endpoint = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + + Attributes: + name (str): + Required. The name of the Endpoint resource to be deleted. + Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource into which to + deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + traffic_split (Mapping[str, int]): + A map from a DeployedModel's ID to the percentage of this + Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the just + being deployed Model, a "0" should be used, and the actual + ID of the new DeployedModel will be filled in its place by + this method. The traffic percentage values must add up to + 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + is not updated. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.DeployedModel, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class DeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + Attributes: + deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): + The DeployedModel that had been deployed in + the Endpoint. + """ + + deployed_model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.DeployedModel, + ) + + +class DeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource from which to + undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model_id (str): + Required. The ID of the DeployedModel to be + undeployed from the Endpoint. + traffic_split (Mapping[str, int]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is being + undeployed from the Endpoint, the [Endpoint.traffic_split] + will always end up empty when this call returns. A + DeployedModel will be successfully undeployed only if it + doesn't have any traffic assigned to it when this method + executes, or if this field unassigns any traffic to it. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class UndeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + """ + + +class UndeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py new file mode 100644 index 0000000000..11cb91d094 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'EntityType', + }, +) + + +class EntityType(proto.Message): + r"""An entity type is a type of object in a system that needs to + be modeled and have stored information about. For example, + driver is an entity type, and driver0 is an instance of an + entity type driver. + + Attributes: + name (str): + Immutable. Name of the EntityType. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + The last part entity_type is assigned by the client. The + entity_type can be up to 64 characters long and can consist + only of ASCII Latin letters A-Z and a-z and underscore(_), + and ASCII digits 0-9 starting with a letter. The value will + be unique given a featurestore. + description (str): + Optional. Description of the EntityType. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your EntityTypes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one EntityType + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Optional. Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): + Optional. The default monitoring configuration for all + Features with value type + ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) + BOOL, STRING, DOUBLE or INT64 under this EntityType. + + If this is populated with + [FeaturestoreMonitoringConfig.monitoring_interval] + specified, snapshot analysis monitoring is enabled. + Otherwise, snapshot analysis monitoring is disabled. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + monitoring_config = proto.Field( + proto.MESSAGE, + number=8, + message=featurestore_monitoring.FeaturestoreMonitoringConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py new file mode 100644 index 0000000000..8892da7b70 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'EnvVar', + }, +) + + +class EnvVar(proto.Message): + r"""Represents an environment variable present in a Container or + Python Module. + + Attributes: + name (str): + Required. Name of the environment variable. + Must be a valid C identifier. + value (str): + Required. Variables that reference a $(VAR_NAME) are + expanded using the previous defined environment variables in + the container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped + references will never be expanded, regardless of whether the + variable exists or not. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py new file mode 100644 index 0000000000..9211302b38 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Event', + }, +) + + +class Event(proto.Message): + r"""An edge describing the relationship between an Artifact and + an Execution in a lineage graph. + + Attributes: + artifact (str): + Required. The relative resource name of the + Artifact in the Event. + execution (str): + Output only. The relative resource name of + the Execution in the Event. + event_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time the Event occurred. + type_ (google.cloud.aiplatform_v1beta1.types.Event.Type): + Required. The type of the Event. + labels (Mapping[str, str]): + The labels with user-defined metadata to + annotate Events. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Event (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + """ + class Type(proto.Enum): + r"""Describes whether an Event's Artifact is the Execution's + input or output. + """ + TYPE_UNSPECIFIED = 0 + INPUT = 1 + OUTPUT = 2 + + artifact = proto.Field( + proto.STRING, + number=1, + ) + execution = proto.Field( + proto.STRING, + number=2, + ) + event_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + type_ = proto.Field( + proto.ENUM, + number=4, + enum=Type, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py new file mode 100644 index 0000000000..30540aab5c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Execution', + }, +) + + +class Execution(proto.Message): + r"""Instance of a general execution. + + Attributes: + name (str): + Output only. The resource name of the + Execution. + display_name (str): + User provided display name of the Execution. + May be up to 128 Unicode characters. + state (google.cloud.aiplatform_v1beta1.types.Execution.State): + The state of this Execution. This is a + property of the Execution, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex AI Pipelines) + and the system does not prescribe or check the + validity of state transitions. + etag (str): + An eTag used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Executions. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Execution (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was last updated. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in ``schema_title`` to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Execution. + The size of this field should not exceed 200KB. + description (str): + Description of the Execution + """ + class State(proto.Enum): + r"""Describes the state of the Execution.""" + STATE_UNSPECIFIED = 0 + NEW = 1 + RUNNING = 2 + COMPLETE = 3 + FAILED = 4 + CACHED = 5 + CANCELLED = 6 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + schema_title = proto.Field( + proto.STRING, + number=13, + ) + schema_version = proto.Field( + proto.STRING, + number=14, + ) + metadata = proto.Field( + proto.MESSAGE, + number=15, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py new file mode 100644 index 0000000000..6ed6468305 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -0,0 +1,947 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Explanation', + 'ModelExplanation', + 'Attribution', + 'Neighbor', + 'ExplanationSpec', + 'ExplanationParameters', + 'SampledShapleyAttribution', + 'IntegratedGradientsAttribution', + 'XraiAttribution', + 'SmoothGradConfig', + 'FeatureNoiseSigma', + 'BlurBaselineConfig', + 'Examples', + 'Presets', + 'ExplanationSpecOverride', + 'ExplanationMetadataOverride', + 'ExamplesOverride', + 'ExamplesRestrictionsNamespace', + }, +) + + +class Explanation(proto.Message): + r"""Explanation of a prediction (provided in + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]) + produced by the Model on a given + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. + + Attributes: + attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): + Output only. Feature attributions grouped by predicted + outputs. + + For Models that predict only one output, such as regression + Models that predict only one score, there is only one + attibution that explains the predicted output. For Models + that predict multiple outputs, such as multiclass Models + that predict multiple classes, each element explains one + specific item. + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + can be used to identify which output this attribution is + explaining. + + If users set + [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], + the attributions are sorted by + [instance_output_value][Attributions.instance_output_value] + in descending order. If + [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] + is specified, the attributions are stored by + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + in the same order as they appear in the output_indices. + neighbors (Sequence[google.cloud.aiplatform_v1beta1.types.Neighbor]): + Output only. List of the nearest neighbors + for example-based explanations. + For models deployed with the examples + explanations feature enabled, the attributions + field is empty and instead the neighbors field + is populated. + """ + + attributions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Attribution', + ) + neighbors = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='Neighbor', + ) + + +class ModelExplanation(proto.Message): + r"""Aggregated explanation metrics for a Model over a set of + instances. + + Attributes: + mean_attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): + Output only. Aggregated attributions explaining the Model's + prediction outputs over the set of instances. The + attributions are grouped by outputs. + + For Models that predict only one output, such as regression + Models that predict only one score, there is only one + attibution that explains the predicted output. For Models + that predict multiple outputs, such as multiclass Models + that predict multiple classes, each element explains one + specific item. + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + can be used to identify which output this attribution is + explaining. + + The + [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value], + [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] + and + [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + fields are averaged over the test data. + + NOTE: Currently AutoML tabular classification Models produce + only one attribution, which averages attributions over all + the classes it predicts. + [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] + is not populated. + """ + + mean_attributions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Attribution', + ) + + +class Attribution(proto.Message): + r"""Attribution that explains a particular prediction output. + + Attributes: + baseline_output_value (float): + Output only. Model predicted output if the input instance is + constructed from the baselines of all the features defined + in + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + The field name of the output is determined by the key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + + If the Model's predicted output has multiple dimensions + (rank > 1), this is the value in the output located by + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + + If there are multiple baselines, their output values are + averaged. + instance_output_value (float): + Output only. Model predicted output on the corresponding + [explanation instance][ExplainRequest.instances]. The field + name of the output is determined by the key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + + If the Model predicted output has multiple dimensions, this + is the value in the output located by + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + feature_attributions (google.protobuf.struct_pb2.Value): + Output only. Attributions of each explained feature. + Features are extracted from the [prediction + instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + according to [explanation metadata for + inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + + The value is a struct, whose keys are the name of the + feature. The values are how much the feature in the + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + contributed to the predicted result. + + The format of the value is determined by the feature's input + format: + + - If the feature is a scalar value, the attribution value + is a [floating + number][google.protobuf.Value.number_value]. + + - If the feature is an array of scalar values, the + attribution value is an + [array][google.protobuf.Value.list_value]. + + - If the feature is a struct, the attribution value is a + [struct][google.protobuf.Value.struct_value]. The keys in + the attribution value struct are the same as the keys in + the feature struct. The formats of the values in the + attribution struct are determined by the formats of the + values in the feature struct. + + The + [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] + field, pointed to by the + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] + field of the + [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + object, points to the schema file that describes the + features and their attribution values (if it is populated). + output_index (Sequence[int]): + Output only. The index that locates the explained prediction + output. + + If the prediction output is a scalar value, output_index is + not populated. If the prediction output has multiple + dimensions, the length of the output_index list is the same + as the number of dimensions of the output. The i-th element + in output_index is the element index of the i-th dimension + of the output vector. Indices start from 0. + output_display_name (str): + Output only. The display name of the output identified by + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + For example, the predicted class name by a + multi-classification Model. + + This field is only populated iff the Model predicts display + names as a separate field along with the explained output. + The predicted display name must has the same shape of the + explained output, and can be located using output_index. + approximation_error (float): + Output only. Error of + [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + caused by approximation used in the explanation method. + Lower value means more precise attributions. + + - For Sampled Shapley + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], + increasing + [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] + might reduce the error. + - For Integrated Gradients + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], + increasing + [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] + might reduce the error. + - For [XRAI + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], + increasing + [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] + might reduce the error. + + See `this + introduction `__ + for more information. + output_name (str): + Output only. Name of the explain output. Specified as the + key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + """ + + baseline_output_value = proto.Field( + proto.DOUBLE, + number=1, + ) + instance_output_value = proto.Field( + proto.DOUBLE, + number=2, + ) + feature_attributions = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + output_index = proto.RepeatedField( + proto.INT32, + number=4, + ) + output_display_name = proto.Field( + proto.STRING, + number=5, + ) + approximation_error = proto.Field( + proto.DOUBLE, + number=6, + ) + output_name = proto.Field( + proto.STRING, + number=7, + ) + + +class Neighbor(proto.Message): + r"""Neighbors for example-based explanations. + + Attributes: + neighbor_id (str): + Output only. The neighbor id. + neighbor_distance (float): + Output only. The neighbor distance. + """ + + neighbor_id = proto.Field( + proto.STRING, + number=1, + ) + neighbor_distance = proto.Field( + proto.DOUBLE, + number=2, + ) + + +class ExplanationSpec(proto.Message): + r"""Specification of Model explanation. + + Attributes: + parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): + Required. Parameters that configure + explaining of the Model's predictions. + metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata): + Required. Metadata describing the Model's + input and output for explanation. + """ + + parameters = proto.Field( + proto.MESSAGE, + number=1, + message='ExplanationParameters', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message=explanation_metadata.ExplanationMetadata, + ) + + +class ExplanationParameters(proto.Message): + r"""Parameters to configure explaining for Model's predictions. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + sampled_shapley_attribution (google.cloud.aiplatform_v1beta1.types.SampledShapleyAttribution): + An attribution method that approximates + Shapley values for features that contribute to + the label being predicted. A sampling strategy + is used to approximate the value rather than + considering all subsets of features. Refer to + this paper for model details: + https://arxiv.org/abs/1306.4265. + + This field is a member of `oneof`_ ``method``. + integrated_gradients_attribution (google.cloud.aiplatform_v1beta1.types.IntegratedGradientsAttribution): + An attribution method that computes + Aumann-Shapley values taking advantage of the + model's fully differentiable structure. Refer to + this paper for more details: + https://arxiv.org/abs/1703.01365 + + This field is a member of `oneof`_ ``method``. + xrai_attribution (google.cloud.aiplatform_v1beta1.types.XraiAttribution): + An attribution method that redistributes + Integrated Gradients attribution to segmented + regions, taking advantage of the model's fully + differentiable structure. Refer to this paper + for more details: + https://arxiv.org/abs/1906.02825 + XRAI currently performs better on natural + images, like a picture of a house or an animal. + If the images are taken in artificial + environments, like a lab or manufacturing line, + or from diagnostic equipment, like x-rays or + quality-control cameras, use Integrated + Gradients instead. + + This field is a member of `oneof`_ ``method``. + examples (google.cloud.aiplatform_v1beta1.types.Examples): + Example-based explanations that returns the + nearest neighbors from the provided dataset. + + This field is a member of `oneof`_ ``method``. + top_k (int): + If populated, returns attributions for top K + indices of outputs (defaults to 1). Only applies + to Models that predicts more than one outputs + (e,g, multi-class Models). When set to -1, + returns explanations for all outputs. + output_indices (google.protobuf.struct_pb2.ListValue): + If populated, only returns attributions that have + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + contained in output_indices. It must be an ndarray of + integers, with the same shape of the output it's explaining. + + If not populated, returns attributions for + [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] + indices of outputs. If neither top_k nor output_indeices is + populated, returns the argmax index of the outputs. + + Only applicable to Models that predict multiple outputs + (e,g, multi-class Models that predict multiple classes). + """ + + sampled_shapley_attribution = proto.Field( + proto.MESSAGE, + number=1, + oneof='method', + message='SampledShapleyAttribution', + ) + integrated_gradients_attribution = proto.Field( + proto.MESSAGE, + number=2, + oneof='method', + message='IntegratedGradientsAttribution', + ) + xrai_attribution = proto.Field( + proto.MESSAGE, + number=3, + oneof='method', + message='XraiAttribution', + ) + examples = proto.Field( + proto.MESSAGE, + number=7, + oneof='method', + message='Examples', + ) + top_k = proto.Field( + proto.INT32, + number=4, + ) + output_indices = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.ListValue, + ) + + +class SampledShapleyAttribution(proto.Message): + r"""An attribution method that approximates Shapley values for + features that contribute to the label being predicted. A + sampling strategy is used to approximate the value rather than + considering all subsets of features. + + Attributes: + path_count (int): + Required. The number of feature permutations to consider + when approximating the Shapley values. + + Valid range of its value is [1, 50], inclusively. + """ + + path_count = proto.Field( + proto.INT32, + number=1, + ) + + +class IntegratedGradientsAttribution(proto.Message): + r"""An attribution method that computes the Aumann-Shapley value + taking advantage of the model's fully differentiable structure. + Refer to this paper for more details: + https://arxiv.org/abs/1703.01365 + + Attributes: + step_count (int): + Required. The number of steps for approximating the path + integral. A good value to start is 50 and gradually increase + until the sum to diff property is within the desired error + range. + + Valid range of its value is [1, 100], inclusively. + smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): + Config for SmoothGrad approximation of + gradients. + When enabled, the gradients are approximated by + averaging the gradients from noisy samples in + the vicinity of the inputs. Adding noise can + help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): + Config for IG with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 + """ + + step_count = proto.Field( + proto.INT32, + number=1, + ) + smooth_grad_config = proto.Field( + proto.MESSAGE, + number=2, + message='SmoothGradConfig', + ) + blur_baseline_config = proto.Field( + proto.MESSAGE, + number=3, + message='BlurBaselineConfig', + ) + + +class XraiAttribution(proto.Message): + r"""An explanation method that redistributes Integrated Gradients + attributions to segmented regions, taking advantage of the + model's fully differentiable structure. Refer to this paper for + more details: https://arxiv.org/abs/1906.02825 + + Supported only by image Models. + + Attributes: + step_count (int): + Required. The number of steps for approximating the path + integral. A good value to start is 50 and gradually increase + until the sum to diff property is met within the desired + error range. + + Valid range of its value is [1, 100], inclusively. + smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): + Config for SmoothGrad approximation of + gradients. + When enabled, the gradients are approximated by + averaging the gradients from noisy samples in + the vicinity of the inputs. Adding noise can + help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): + Config for XRAI with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 + """ + + step_count = proto.Field( + proto.INT32, + number=1, + ) + smooth_grad_config = proto.Field( + proto.MESSAGE, + number=2, + message='SmoothGradConfig', + ) + blur_baseline_config = proto.Field( + proto.MESSAGE, + number=3, + message='BlurBaselineConfig', + ) + + +class SmoothGradConfig(proto.Message): + r"""Config for SmoothGrad approximation of gradients. + When enabled, the gradients are approximated by averaging the + gradients from noisy samples in the vicinity of the inputs. + Adding noise can help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + noise_sigma (float): + This is a single float value and will be used to add noise + to all the features. Use this field when all features are + normalized to have the same distribution: scale to range [0, + 1], [-1, 1] or z-scoring, where features are normalized to + have 0-mean and 1-variance. Learn more about + `normalization `__. + + For best results the recommended value is about 10% - 20% of + the standard deviation of the input feature. Refer to + section 3.2 of the SmoothGrad paper: + https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. + + If the distribution is different per feature, set + [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] + instead for each feature. + + This field is a member of `oneof`_ ``GradientNoiseSigma``. + feature_noise_sigma (google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma): + This is similar to + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], + but provides additional flexibility. A separate noise sigma + can be provided for each feature, which is useful if their + distributions are different. No noise is added to features + that are not set. If this field is unset, + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] + will be used for all features. + + This field is a member of `oneof`_ ``GradientNoiseSigma``. + noisy_sample_count (int): + The number of gradient samples to use for approximation. The + higher this number, the more accurate the gradient is, but + the runtime complexity increases by this factor as well. + Valid range of its value is [1, 50]. Defaults to 3. + """ + + noise_sigma = proto.Field( + proto.FLOAT, + number=1, + oneof='GradientNoiseSigma', + ) + feature_noise_sigma = proto.Field( + proto.MESSAGE, + number=2, + oneof='GradientNoiseSigma', + message='FeatureNoiseSigma', + ) + noisy_sample_count = proto.Field( + proto.INT32, + number=3, + ) + + +class FeatureNoiseSigma(proto.Message): + r"""Noise sigma by features. Noise sigma represents the standard + deviation of the gaussian kernel that will be used to add noise + to interpolated inputs prior to computing gradients. + + Attributes: + noise_sigma (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma.NoiseSigmaForFeature]): + Noise sigma per feature. No noise is added to + features that are not set. + """ + + class NoiseSigmaForFeature(proto.Message): + r"""Noise sigma for a single feature. + + Attributes: + name (str): + The name of the input feature for which noise sigma is + provided. The features are defined in [explanation metadata + inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + sigma (float): + This represents the standard deviation of the Gaussian + kernel that will be used to add noise to the feature prior + to computing gradients. Similar to + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] + but represents the noise added to the current feature. + Defaults to 0.1. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + sigma = proto.Field( + proto.FLOAT, + number=2, + ) + + noise_sigma = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=NoiseSigmaForFeature, + ) + + +class BlurBaselineConfig(proto.Message): + r"""Config for blur baseline. + When enabled, a linear path from the maximally blurred image to + the input image is created. Using a blurred baseline instead of + zero (black image) is motivated by the BlurIG approach explained + here: + https://arxiv.org/abs/2004.03383 + + Attributes: + max_blur_sigma (float): + The standard deviation of the blur kernel for + the blurred baseline. The same blurring + parameter is used for both the height and the + width dimension. If not set, the method defaults + to the zero (i.e. black for images) baseline. + """ + + max_blur_sigma = proto.Field( + proto.FLOAT, + number=1, + ) + + +class Examples(proto.Message): + r"""Example-based explainability that returns the nearest + neighbors from the provided dataset. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + nearest_neighbor_search_config (google.protobuf.struct_pb2.Value): + The configuration for the generated index, the semantics are + the same as + [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] + and should match NearestNeighborSearchConfig. + + This field is a member of `oneof`_ ``config``. + presets (google.cloud.aiplatform_v1beta1.types.Presets): + Preset config based on the desired query + speed-precision trade-off and modality + + This field is a member of `oneof`_ ``config``. + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Cloud Storage location for the input + instances. + neighbor_count (int): + The number of neighbors to return. + """ + + nearest_neighbor_search_config = proto.Field( + proto.MESSAGE, + number=2, + oneof='config', + message=struct_pb2.Value, + ) + presets = proto.Field( + proto.MESSAGE, + number=4, + oneof='config', + message='Presets', + ) + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + message=io.GcsSource, + ) + neighbor_count = proto.Field( + proto.INT32, + number=3, + ) + + +class Presets(proto.Message): + r"""Preset configuration for example-based explanations + + Attributes: + query (google.cloud.aiplatform_v1beta1.types.Presets.Query): + Preset option controlling parameters for + query speed-precision trade-off + + This field is a member of `oneof`_ ``_query``. + modality (google.cloud.aiplatform_v1beta1.types.Presets.Modality): + Preset option controlling parameters for + different modalities + """ + class Query(proto.Enum): + r"""Preset option controlling parameters for query + speed-precision trade-off + """ + PRECISE = 0 + FAST = 1 + + class Modality(proto.Enum): + r"""Preset option controlling parameters for different modalities""" + MODALITY_UNSPECIFIED = 0 + IMAGE = 1 + TEXT = 2 + TABULAR = 3 + + query = proto.Field( + proto.ENUM, + number=1, + optional=True, + enum=Query, + ) + modality = proto.Field( + proto.ENUM, + number=2, + enum=Modality, + ) + + +class ExplanationSpecOverride(proto.Message): + r"""The + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] + entries that can be overridden at [online + explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + time. + + Attributes: + parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): + The parameters to be overridden. Note that the + [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] + cannot be changed. If not specified, no parameter is + overridden. + metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride): + The metadata to be overridden. If not + specified, no metadata is overridden. + examples_override (google.cloud.aiplatform_v1beta1.types.ExamplesOverride): + The example-based explanations parameter + overrides. + """ + + parameters = proto.Field( + proto.MESSAGE, + number=1, + message='ExplanationParameters', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='ExplanationMetadataOverride', + ) + examples_override = proto.Field( + proto.MESSAGE, + number=3, + message='ExamplesOverride', + ) + + +class ExplanationMetadataOverride(proto.Message): + r"""The + [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata] + entries that can be overridden at [online + explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + time. + + Attributes: + inputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputMetadataOverride]): + Required. Overrides the [input + metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] + of the features. The key is the name of the feature to be + overridden. The keys specified here must exist in the input + metadata to be overridden. If a feature is not specified + here, the corresponding feature's input metadata is not + overridden. + """ + + class InputMetadataOverride(proto.Message): + r"""The [input + metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] + entries to be overridden. + + Attributes: + input_baselines (Sequence[google.protobuf.struct_pb2.Value]): + Baseline inputs for this feature. + + This overrides the ``input_baseline`` field of the + [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] + object of the corresponding feature's input metadata. If + it's not specified, the original baselines are not + overridden. + """ + + input_baselines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=InputMetadataOverride, + ) + + +class ExamplesOverride(proto.Message): + r"""Overrides for example-based explanations. + + Attributes: + neighbor_count (int): + The number of neighbors to return. + crowding_count (int): + The number of neighbors to return that have + the same crowding tag. + restrictions (Sequence[google.cloud.aiplatform_v1beta1.types.ExamplesRestrictionsNamespace]): + Restrict the resulting nearest neighbors to + respect these constraints. + return_embeddings (bool): + If true, return the embeddings instead of + neighbors. + data_format (google.cloud.aiplatform_v1beta1.types.ExamplesOverride.DataFormat): + The format of the data being provided with + each call. + """ + class DataFormat(proto.Enum): + r"""Data format enum.""" + DATA_FORMAT_UNSPECIFIED = 0 + INSTANCES = 1 + EMBEDDINGS = 2 + + neighbor_count = proto.Field( + proto.INT32, + number=1, + ) + crowding_count = proto.Field( + proto.INT32, + number=2, + ) + restrictions = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='ExamplesRestrictionsNamespace', + ) + return_embeddings = proto.Field( + proto.BOOL, + number=4, + ) + data_format = proto.Field( + proto.ENUM, + number=5, + enum=DataFormat, + ) + + +class ExamplesRestrictionsNamespace(proto.Message): + r"""Restrictions namespace for example-based explanations + overrides. + + Attributes: + namespace_name (str): + The namespace name. + allow (Sequence[str]): + The list of allowed tags. + deny (Sequence[str]): + The list of deny tags. + """ + + namespace_name = proto.Field( + proto.STRING, + number=1, + ) + allow = proto.RepeatedField( + proto.STRING, + number=2, + ) + deny = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py new file mode 100644 index 0000000000..7a0d106dad --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -0,0 +1,467 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ExplanationMetadata', + }, +) + + +class ExplanationMetadata(proto.Message): + r"""Metadata describing the Model's input and output for + explanation. + + Attributes: + inputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata]): + Required. Map from feature names to feature input metadata. + Keys are the name of the features. Values are the + specification of the feature. + + An empty InputMetadata is valid. It describes a text feature + which has the name specified as the key in + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + The baseline of the empty feature is chosen by Vertex AI. + + For Vertex AI-provided Tensorflow images, the key can be any + friendly name of the feature. Once specified, + [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + are keyed by this key (if not grouped with another feature). + + For custom images, the key must match with the key in + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. + outputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputMetadata]): + Required. Map from output names to output + metadata. + For Vertex AI-provided Tensorflow images, keys + can be any user defined string that consists of + any UTF-8 characters. + For custom images, keys are the name of the + output field in the prediction to be explained. + + Currently only one key is allowed. + feature_attributions_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing the format of the [feature + attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML tabular Models always have this field populated by + Vertex AI. Note: The URI given on output may be different, + including the URI scheme, than the one given on input. The + output URI will point to a location where the user only has + a read access. + latent_space_source (str): + Name of the source to generate embeddings for + example based explanations. + """ + + class InputMetadata(proto.Message): + r"""Metadata of the input of a feature. + + Fields other than + [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] + are applicable only for Models that are using Vertex AI-provided + images for Tensorflow. + + Attributes: + input_baselines (Sequence[google.protobuf.struct_pb2.Value]): + Baseline inputs for this feature. + + If no baseline is specified, Vertex AI chooses the baseline + for this feature. If multiple baselines are specified, + Vertex AI returns the average attributions across them in + [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. + + For Vertex AI-provided Tensorflow images (both 1.x and 2.x), + the shape of each baseline must match the shape of the input + tensor. If a scalar is provided, we broadcast to the same + shape as the input tensor. + + For custom images, the element of the baselines must be in + the same format as the feature's input in the + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + input_tensor_name (str): + Name of the input tensor for this feature. + Required and is only applicable to Vertex + AI-provided images for Tensorflow. + encoding (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Encoding): + Defines how the feature is encoded into the + input tensor. Defaults to IDENTITY. + modality (str): + Modality of the feature. Valid values are: + numeric, image. Defaults to numeric. + feature_value_domain (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.FeatureValueDomain): + The domain details of the input feature + value. Like min/max, original mean or standard + deviation if normalized. + indices_tensor_name (str): + Specifies the index of the values of the input tensor. + Required when the input tensor is a sparse representation. + Refer to Tensorflow documentation for more details: + https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + dense_shape_tensor_name (str): + Specifies the shape of the values of the input if the input + is a sparse representation. Refer to Tensorflow + documentation for more details: + https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + index_feature_mapping (Sequence[str]): + A list of feature names for each index in the input tensor. + Required when the input + [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] + is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. + encoded_tensor_name (str): + Encoded tensor is a transformation of the input tensor. Must + be provided if choosing [Integrated Gradients + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution] + or [XRAI + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] + and the input tensor is not differentiable. + + An encoded tensor is generated if the input tensor is + encoded by a lookup table. + encoded_baselines (Sequence[google.protobuf.struct_pb2.Value]): + A list of baselines for the encoded tensor. + The shape of each baseline should match the + shape of the encoded tensor. If a scalar is + provided, Vertex AI broadcasts to the same shape + as the encoded tensor. + visualization (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization): + Visualization configurations for image + explanation. + group_name (str): + Name of the group that the input belongs to. Features with + the same group name will be treated as one feature when + computing attributions. Features grouped together can have + different shapes in value. If provided, there will be one + single attribution generated in + [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions], + keyed by the group name. + """ + class Encoding(proto.Enum): + r"""Defines how a feature is encoded. Defaults to IDENTITY.""" + ENCODING_UNSPECIFIED = 0 + IDENTITY = 1 + BAG_OF_FEATURES = 2 + BAG_OF_FEATURES_SPARSE = 3 + INDICATOR = 4 + COMBINED_EMBEDDING = 5 + CONCAT_EMBEDDING = 6 + + class FeatureValueDomain(proto.Message): + r"""Domain details of the input feature value. Provides numeric + information about the feature, such as its range (min, max). If the + feature has been pre-processed, for example with z-scoring, then it + provides information about how to recover the original feature. For + example, if the input feature is an image and it has been + pre-processed to obtain 0-mean and stddev = 1 values, then + original_mean, and original_stddev refer to the mean and stddev of + the original feature (e.g. image tensor) from which input feature + (with mean = 0 and stddev = 1) was obtained. + + Attributes: + min_value (float): + The minimum permissible value for this + feature. + max_value (float): + The maximum permissible value for this + feature. + original_mean (float): + If this input feature has been normalized to a mean value of + 0, the original_mean specifies the mean value of the domain + prior to normalization. + original_stddev (float): + If this input feature has been normalized to a standard + deviation of 1.0, the original_stddev specifies the standard + deviation of the domain prior to normalization. + """ + + min_value = proto.Field( + proto.FLOAT, + number=1, + ) + max_value = proto.Field( + proto.FLOAT, + number=2, + ) + original_mean = proto.Field( + proto.FLOAT, + number=3, + ) + original_stddev = proto.Field( + proto.FLOAT, + number=4, + ) + + class Visualization(proto.Message): + r"""Visualization configurations for image explanation. + + Attributes: + type_ (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Type): + Type of the image visualization. Only applicable to + [Integrated Gradients + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. + OUTLINES shows regions of attribution, while PIXELS shows + per-pixel attribution. Defaults to OUTLINES. + polarity (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity): + Whether to only highlight pixels with + positive contributions, negative or both. + Defaults to POSITIVE. + color_map (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.ColorMap): + The color scheme used for the highlighted areas. + + Defaults to PINK_GREEN for [Integrated Gradients + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], + which shows positive attributions in green and negative in + pink. + + Defaults to VIRIDIS for [XRAI + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], + which highlights the most influential regions in yellow and + the least influential in blue. + clip_percent_upperbound (float): + Excludes attributions above the specified percentile from + the highlighted areas. Using the clip_percent_upperbound and + clip_percent_lowerbound together can be useful for filtering + out noise and making it easier to see areas of strong + attribution. Defaults to 99.9. + clip_percent_lowerbound (float): + Excludes attributions below the specified + percentile, from the highlighted areas. Defaults + to 62. + overlay_type (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType): + How the original image is displayed in the + visualization. Adjusting the overlay can help + increase visual clarity if the original image + makes it difficult to view the visualization. + Defaults to NONE. + """ + class Type(proto.Enum): + r"""Type of the image visualization. Only applicable to [Integrated + Gradients + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. + """ + TYPE_UNSPECIFIED = 0 + PIXELS = 1 + OUTLINES = 2 + + class Polarity(proto.Enum): + r"""Whether to only highlight pixels with positive contributions, + negative or both. Defaults to POSITIVE. + """ + POLARITY_UNSPECIFIED = 0 + POSITIVE = 1 + NEGATIVE = 2 + BOTH = 3 + + class ColorMap(proto.Enum): + r"""The color scheme used for highlighting areas.""" + COLOR_MAP_UNSPECIFIED = 0 + PINK_GREEN = 1 + VIRIDIS = 2 + RED = 3 + GREEN = 4 + RED_GREEN = 6 + PINK_WHITE_GREEN = 5 + + class OverlayType(proto.Enum): + r"""How the original image is displayed in the visualization.""" + OVERLAY_TYPE_UNSPECIFIED = 0 + NONE = 1 + ORIGINAL = 2 + GRAYSCALE = 3 + MASK_BLACK = 4 + + type_ = proto.Field( + proto.ENUM, + number=1, + enum='ExplanationMetadata.InputMetadata.Visualization.Type', + ) + polarity = proto.Field( + proto.ENUM, + number=2, + enum='ExplanationMetadata.InputMetadata.Visualization.Polarity', + ) + color_map = proto.Field( + proto.ENUM, + number=3, + enum='ExplanationMetadata.InputMetadata.Visualization.ColorMap', + ) + clip_percent_upperbound = proto.Field( + proto.FLOAT, + number=4, + ) + clip_percent_lowerbound = proto.Field( + proto.FLOAT, + number=5, + ) + overlay_type = proto.Field( + proto.ENUM, + number=6, + enum='ExplanationMetadata.InputMetadata.Visualization.OverlayType', + ) + + input_baselines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + input_tensor_name = proto.Field( + proto.STRING, + number=2, + ) + encoding = proto.Field( + proto.ENUM, + number=3, + enum='ExplanationMetadata.InputMetadata.Encoding', + ) + modality = proto.Field( + proto.STRING, + number=4, + ) + feature_value_domain = proto.Field( + proto.MESSAGE, + number=5, + message='ExplanationMetadata.InputMetadata.FeatureValueDomain', + ) + indices_tensor_name = proto.Field( + proto.STRING, + number=6, + ) + dense_shape_tensor_name = proto.Field( + proto.STRING, + number=7, + ) + index_feature_mapping = proto.RepeatedField( + proto.STRING, + number=8, + ) + encoded_tensor_name = proto.Field( + proto.STRING, + number=9, + ) + encoded_baselines = proto.RepeatedField( + proto.MESSAGE, + number=10, + message=struct_pb2.Value, + ) + visualization = proto.Field( + proto.MESSAGE, + number=11, + message='ExplanationMetadata.InputMetadata.Visualization', + ) + group_name = proto.Field( + proto.STRING, + number=12, + ) + + class OutputMetadata(proto.Message): + r"""Metadata of the prediction output to be explained. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + index_display_name_mapping (google.protobuf.struct_pb2.Value): + Static mapping between the index and display name. + + Use this if the outputs are a deterministic n-dimensional + array, e.g. a list of scores of all the classes in a + pre-defined order for a multi-classification Model. It's not + feasible if the outputs are non-deterministic, e.g. the + Model produces top-k classes or sort the outputs by their + values. + + The shape of the value must be an n-dimensional array of + strings. The number of dimensions must match that of the + outputs to be explained. The + [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] + is populated by locating in the mapping with + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + + This field is a member of `oneof`_ ``display_name_mapping``. + display_name_mapping_key (str): + Specify a field name in the prediction to look for the + display name. + + Use this if the prediction contains the display names for + the outputs. + + The display names in the prediction must have the same shape + of the outputs, so that it can be located by + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + for a specific output. + + This field is a member of `oneof`_ ``display_name_mapping``. + output_tensor_name (str): + Name of the output tensor. Required and is + only applicable to Vertex AI provided images for + Tensorflow. + """ + + index_display_name_mapping = proto.Field( + proto.MESSAGE, + number=1, + oneof='display_name_mapping', + message=struct_pb2.Value, + ) + display_name_mapping_key = proto.Field( + proto.STRING, + number=2, + oneof='display_name_mapping', + ) + output_tensor_name = proto.Field( + proto.STRING, + number=3, + ) + + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=InputMetadata, + ) + outputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message=OutputMetadata, + ) + feature_attributions_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + latent_space_source = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py new file mode 100644 index 0000000000..58ee963e45 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Feature', + }, +) + + +class Feature(proto.Message): + r"""Feature Metadata information that describes an attribute of + an entity type. For example, apple is an entity type, and color + is a feature that describes apple. + + Attributes: + name (str): + Immutable. Name of the Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + The last part feature is assigned by the client. The feature + can be up to 64 characters long and can consist only of + ASCII Latin letters A-Z and a-z, underscore(_), and ASCII + digits 0-9 starting with a letter. The value will be unique + given an entity type. + description (str): + Description of the Feature. + value_type (google.cloud.aiplatform_v1beta1.types.Feature.ValueType): + Required. Immutable. Type of Feature value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your Features. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one Feature + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): + Optional. Deprecated: The custom monitoring configuration + for this Feature, if not set, use the monitoring_config + defined for the EntityType this Feature belongs to. Only + Features with type + ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) + BOOL, STRING, DOUBLE or INT64 can enable monitoring. + + If this is populated with + [FeaturestoreMonitoringConfig.disabled][] = true, snapshot + analysis monitoring is disabled; if + [FeaturestoreMonitoringConfig.monitoring_interval][] + specified, snapshot analysis monitoring is enabled. + Otherwise, snapshot analysis monitoring config is same as + the EntityType's this Feature belongs to. + disable_monitoring (bool): + Optional. If not set, use the monitoring_config defined for + the EntityType this Feature belongs to. Only Features with + type + ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) + BOOL, STRING, DOUBLE or INT64 can enable monitoring. + + If set to true, all types of data monitoring are disabled + despite the config on EntityType. + monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): + Output only. A list of historical [Snapshot + Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis] + stats requested by user, sorted by + [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] + descending. + monitoring_stats_anomalies (Sequence[google.cloud.aiplatform_v1beta1.types.Feature.MonitoringStatsAnomaly]): + Output only. The list of historical stats and + anomalies with specified objectives. + """ + class ValueType(proto.Enum): + r"""An enum representing the value type of a feature.""" + VALUE_TYPE_UNSPECIFIED = 0 + BOOL = 1 + BOOL_ARRAY = 2 + DOUBLE = 3 + DOUBLE_ARRAY = 4 + INT64 = 9 + INT64_ARRAY = 10 + STRING = 11 + STRING_ARRAY = 12 + BYTES = 13 + + class MonitoringStatsAnomaly(proto.Message): + r"""A list of historical [Snapshot + Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis] or [Import + Feature Analysis] + [FeaturestoreMonitoringConfig.ImportFeatureAnalysis] stats requested + by user, sorted by + [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] + descending. + + Attributes: + objective (google.cloud.aiplatform_v1beta1.types.Feature.MonitoringStatsAnomaly.Objective): + Output only. The objective for each stats. + feature_stats_anomaly (google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly): + Output only. The stats and anomalies + generated at specific timestamp. + """ + class Objective(proto.Enum): + r"""If the objective in the request is both + Import Feature Analysis and Snapshot Analysis, this objective + could be one of them. Otherwise, this objective should be the + same as the objective in the request. + """ + OBJECTIVE_UNSPECIFIED = 0 + IMPORT_FEATURE_ANALYSIS = 1 + SNAPSHOT_ANALYSIS = 2 + + objective = proto.Field( + proto.ENUM, + number=1, + enum='Feature.MonitoringStatsAnomaly.Objective', + ) + feature_stats_anomaly = proto.Field( + proto.MESSAGE, + number=2, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + value_type = proto.Field( + proto.ENUM, + number=3, + enum=ValueType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + monitoring_config = proto.Field( + proto.MESSAGE, + number=9, + message=featurestore_monitoring.FeaturestoreMonitoringConfig, + ) + disable_monitoring = proto.Field( + proto.BOOL, + number=12, + ) + monitoring_stats = proto.RepeatedField( + proto.MESSAGE, + number=10, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + monitoring_stats_anomalies = proto.RepeatedField( + proto.MESSAGE, + number=11, + message=MonitoringStatsAnomaly, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py new file mode 100644 index 0000000000..078509c041 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'FeatureStatsAnomaly', + }, +) + + +class FeatureStatsAnomaly(proto.Message): + r"""Stats and Anomaly generated at specific timestamp for specific + Feature. The start_time and end_time are used to define the time + range of the dataset that current stats belongs to, e.g. prediction + traffic is bucketed into prediction datasets by time window. If the + Dataset is not defined by time window, start_time = end_time. + Timestamp of the stats and anomalies always refers to end_time. Raw + stats and anomalies are stored in stats_uri or anomaly_uri in the + tensorflow defined protos. Field data_stats contains almost + identical information with the raw stats in Vertex AI defined proto, + for UI to display. + + Attributes: + score (float): + Feature importance score, only populated when cross-feature + monitoring is enabled. For now only used to represent + feature attribution score within range [0, 1] for + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] + and + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT]. + stats_uri (str): + Path of the stats file for current feature values in Cloud + Storage bucket. Format: + gs:////stats. Example: + gs://monitoring_bucket/feature_name/stats. Stats are stored + as binary format with Protobuf message + `tensorflow.metadata.v0.FeatureNameStatistics `__. + anomaly_uri (str): + Path of the anomaly file for current feature values in Cloud + Storage bucket. Format: + gs:////anomalies. Example: + gs://monitoring_bucket/feature_name/anomalies. Stats are + stored as binary format with Protobuf message Anoamlies are + stored as binary format with Protobuf message + [tensorflow.metadata.v0.AnomalyInfo] + (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). + distribution_deviation (float): + Deviation from the current stats to baseline + stats. 1. For categorical feature, the + distribution distance is calculated by + L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + anomaly_detection_threshold (float): + This is the threshold used when detecting anomalies. The + threshold can be changed by user, so this one might be + different from + [ThresholdConfig.value][google.cloud.aiplatform.v1beta1.ThresholdConfig.value]. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The start timestamp of window where stats were generated. + For objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), start_time is only used + to indicate the monitoring intervals, so it always equals to + (end_time - monitoring_interval). + end_time (google.protobuf.timestamp_pb2.Timestamp): + The end timestamp of window where stats were generated. For + objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), end_time indicates the + timestamp of the data used to generate stats (e.g. timestamp + we take snapshots for feature values). + """ + + score = proto.Field( + proto.DOUBLE, + number=1, + ) + stats_uri = proto.Field( + proto.STRING, + number=3, + ) + anomaly_uri = proto.Field( + proto.STRING, + number=4, + ) + distribution_deviation = proto.Field( + proto.DOUBLE, + number=5, + ) + anomaly_detection_threshold = proto.Field( + proto.DOUBLE, + number=9, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py new file mode 100644 index 0000000000..222cfab929 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'IdMatcher', + 'FeatureSelector', + }, +) + + +class IdMatcher(proto.Message): + r"""Matcher for Features of an EntityType by Feature ID. + + Attributes: + ids (Sequence[str]): + Required. The following are accepted as ``ids``: + + - A single-element list containing only ``*``, which + selects all Features in the target EntityType, or + - A list containing only Feature IDs, which selects only + Features with those IDs in the target EntityType. + """ + + ids = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class FeatureSelector(proto.Message): + r"""Selector for Features of an EntityType. + + Attributes: + id_matcher (google.cloud.aiplatform_v1beta1.types.IdMatcher): + Required. Matches Features based on ID. + """ + + id_matcher = proto.Field( + proto.MESSAGE, + number=1, + message='IdMatcher', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py new file mode 100644 index 0000000000..68689a4a9f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Featurestore', + }, +) + + +class Featurestore(proto.Message): + r"""Vertex AI Feature Store provides a centralized repository for + organizing, storing, and serving ML features. The Featurestore + is a top-level container for your features and their values. + + Attributes: + name (str): + Output only. Name of the Featurestore. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was last updated. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + labels (Mapping[str, str]): + Optional. The labels with user-defined + metadata to organize your Featurestore. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one + Featurestore(System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + online_serving_config (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig): + Optional. Config for online storage + resources. If unset, the featurestore will not + have an online store and cannot be used for + online serving. + state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): + Output only. State of the featurestore. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Optional. Customer-managed encryption key + spec for data storage. If set, both of the + online and offline data storage will be secured + by this key. + """ + class State(proto.Enum): + r"""Possible states a featurestore can have.""" + STATE_UNSPECIFIED = 0 + STABLE = 1 + UPDATING = 2 + + class OnlineServingConfig(proto.Message): + r"""OnlineServingConfig specifies the details for provisioning + online serving resources. + + Attributes: + fixed_node_count (int): + The number of nodes for the online store. The + number of nodes doesn't scale automatically, but + you can manually update the number of nodes. If + set to 0, the featurestore will not have an + online store and cannot be used for online + serving. + scaling (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig.Scaling): + Online serving scaling configuration. Only one of + ``fixed_node_count`` and ``scaling`` can be set. Setting one + will reset the other. + """ + + class Scaling(proto.Message): + r"""Online serving scaling configuration. If min_node_count and + max_node_count are set to the same value, the cluster will be + configured with the fixed number of node (no auto-scaling). + + Attributes: + min_node_count (int): + Required. The minimum number of nodes to + scale down to. Must be greater than or equal to + 1. + max_node_count (int): + The maximum number of nodes to scale up to. Must be greater + than min_node_count, and less than or equal to 10 times of + 'min_node_count'. + """ + + min_node_count = proto.Field( + proto.INT32, + number=1, + ) + max_node_count = proto.Field( + proto.INT32, + number=2, + ) + + fixed_node_count = proto.Field( + proto.INT32, + number=2, + ) + scaling = proto.Field( + proto.MESSAGE, + number=4, + message='Featurestore.OnlineServingConfig.Scaling', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + online_serving_config = proto.Field( + proto.MESSAGE, + number=7, + message=OnlineServingConfig, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=State, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=10, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py new file mode 100644 index 0000000000..d70e06b901 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'FeaturestoreMonitoringConfig', + }, +) + + +class FeaturestoreMonitoringConfig(proto.Message): + r"""Configuration of how features in Featurestore are monitored. + + Attributes: + snapshot_analysis (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.SnapshotAnalysis): + The config for Snapshot Analysis Based + Feature Monitoring. + import_features_analysis (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.ImportFeaturesAnalysis): + The config for ImportFeatures Analysis Based + Feature Monitoring. + numerical_threshold_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.ThresholdConfig): + Threshold for numerical features of anomaly detection. This + is shared by all objectives of Featurestore Monitoring for + numerical features (i.e. Features with type + ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) + DOUBLE or INT64). + categorical_threshold_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.ThresholdConfig): + Threshold for categorical features of anomaly detection. + This is shared by all types of Featurestore Monitoring for + categorical features (i.e. Features with type + ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) + BOOL or STRING). + """ + + class SnapshotAnalysis(proto.Message): + r"""Configuration of the Featurestore's Snapshot Analysis Based + Monitoring. This type of analysis generates statistics for each + Feature based on a snapshot of the latest feature value of each + entities every monitoring_interval. + + Attributes: + disabled (bool): + The monitoring schedule for snapshot analysis. For + EntityType-level config: unset / disabled = true indicates + disabled by default for Features under it; otherwise by + default enable snapshot analysis monitoring with + monitoring_interval for Features under it. Feature-level + config: disabled = true indicates disabled regardless of the + EntityType-level config; unset monitoring_interval indicates + going with EntityType-level config; otherwise run snapshot + analysis monitoring with monitoring_interval regardless of + the EntityType-level config. Explicitly Disable the snapshot + analysis based monitoring. + monitoring_interval (google.protobuf.duration_pb2.Duration): + Configuration of the snapshot analysis based + monitoring pipeline running interval. The value + is rolled up to full day. + monitoring_interval_days (int): + Configuration of the snapshot analysis based monitoring + pipeline running interval. The value indicates number of + days. If both + [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days] + and + [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval] + are set when creating/updating EntityTypes/Features, + [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days] + will be used. + staleness_days (int): + Customized export features time window for + snapshot analysis. Unit is one day. Default + value is 3 weeks. Minimum value is 1 day. + Maximum value is 4000 days. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + monitoring_interval = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + monitoring_interval_days = proto.Field( + proto.INT32, + number=3, + ) + staleness_days = proto.Field( + proto.INT32, + number=4, + ) + + class ImportFeaturesAnalysis(proto.Message): + r"""Configuration of the Featurestore's ImportFeature Analysis Based + Monitoring. This type of analysis generates statistics for values of + each Feature imported by every [ImportFeatureValues][] operation. + + Attributes: + state (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.State): + Whether to enable / disable / inherite + default hebavior for import features analysis. + anomaly_detection_baseline (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.Baseline): + The baseline used to do anomaly detection for + the statistics generated by import features + analysis. + """ + class State(proto.Enum): + r"""The state defines whether to enable ImportFeature analysis.""" + STATE_UNSPECIFIED = 0 + DEFAULT = 1 + ENABLED = 2 + DISABLED = 3 + + class Baseline(proto.Enum): + r"""Defines the baseline to do anomaly detection for feature values + imported by each [ImportFeatureValues][] operation. + """ + BASELINE_UNSPECIFIED = 0 + LATEST_STATS = 1 + MOST_RECENT_SNAPSHOT_STATS = 2 + PREVIOUS_IMPORT_FEATURES_STATS = 3 + + state = proto.Field( + proto.ENUM, + number=1, + enum='FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.State', + ) + anomaly_detection_baseline = proto.Field( + proto.ENUM, + number=2, + enum='FeaturestoreMonitoringConfig.ImportFeaturesAnalysis.Baseline', + ) + + class ThresholdConfig(proto.Message): + r"""The config for Featurestore Monitoring threshold. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value (float): + Specify a threshold value that can trigger + the alert. 1. For categorical feature, the + distribution distance is calculated by + L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. Each feature must have a non-zero + threshold if they need to be monitored. + Otherwise no alert will be triggered for that + feature. + + This field is a member of `oneof`_ ``threshold``. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + oneof='threshold', + ) + + snapshot_analysis = proto.Field( + proto.MESSAGE, + number=1, + message=SnapshotAnalysis, + ) + import_features_analysis = proto.Field( + proto.MESSAGE, + number=2, + message=ImportFeaturesAnalysis, + ) + numerical_threshold_config = proto.Field( + proto.MESSAGE, + number=3, + message=ThresholdConfig, + ) + categorical_threshold_config = proto.Field( + proto.MESSAGE, + number=4, + message=ThresholdConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py new file mode 100644 index 0000000000..be95e722da --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py @@ -0,0 +1,381 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1beta1.types import types +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'FeatureValue', + 'FeatureValueList', + }, +) + + +class ReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + entity_type (str): + Required. The resource name of the EntityType for the entity + being read. Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be ``user``. + entity_id (str): + Required. ID for a specific entity. For example, for a + machine learning model predicting user clicks on a website, + an entity ID could be ``user_123``. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_id = proto.Field( + proto.STRING, + number=2, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=3, + message=gca_feature_selector.FeatureSelector, + ) + + +class ReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + header (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.Header): + Response header. + entity_view (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView): + Entity view with Feature values. This may be + the entity in the Featurestore if values for all + Features were requested, or a projection of the + entity in the Featurestore if values for only + some Features were requested. + """ + + class FeatureDescriptor(proto.Message): + r"""Metadata for requested Features. + + Attributes: + id (str): + Feature ID. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + + class Header(proto.Message): + r"""Response header with metadata for the requested + [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest.entity_type] + and Features. + + Attributes: + entity_type (str): + The resource name of the EntityType from the + [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest]. + Value format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + feature_descriptors (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.FeatureDescriptor]): + List of Feature metadata corresponding to each piece of + [ReadFeatureValuesResponse.data][]. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + feature_descriptors = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ReadFeatureValuesResponse.FeatureDescriptor', + ) + + class EntityView(proto.Message): + r"""Entity view with Feature values. + + Attributes: + entity_id (str): + ID of the requested entity. + data (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView.Data]): + Each piece of data holds the k requested values for one + requested Feature. If no values for the requested Feature + exist, the corresponding cell will be empty. This has the + same size and is in the same order as the features from the + header + [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1beta1.ReadFeatureValuesResponse.header]. + """ + + class Data(proto.Message): + r"""Container to hold value(s), successive in time, for one + Feature from the request. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value (google.cloud.aiplatform_v1beta1.types.FeatureValue): + Feature value if a single value is requested. + + This field is a member of `oneof`_ ``data``. + values (google.cloud.aiplatform_v1beta1.types.FeatureValueList): + Feature values list if values, successive in + time, are requested. If the requested number of + values is greater than the number of existing + Feature values, nonexistent values are omitted + instead of being returned as empty. + + This field is a member of `oneof`_ ``data``. + """ + + value = proto.Field( + proto.MESSAGE, + number=1, + oneof='data', + message='FeatureValue', + ) + values = proto.Field( + proto.MESSAGE, + number=2, + oneof='data', + message='FeatureValueList', + ) + + entity_id = proto.Field( + proto.STRING, + number=1, + ) + data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ReadFeatureValuesResponse.EntityView.Data', + ) + + header = proto.Field( + proto.MESSAGE, + number=1, + message=Header, + ) + entity_view = proto.Field( + proto.MESSAGE, + number=2, + message=EntityView, + ) + + +class StreamingReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + + Attributes: + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be ``user``. + entity_ids (Sequence[str]): + Required. IDs of entities to read Feature values of. The + maximum number of IDs is 100. For example, for a machine + learning model predicting user clicks on a website, an + entity ID could be ``user_123``. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. Feature IDs will be + deduplicated. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=3, + message=gca_feature_selector.FeatureSelector, + ) + + +class FeatureValue(proto.Message): + r"""Value for a feature. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bool_value (bool): + Bool type feature value. + + This field is a member of `oneof`_ ``value``. + double_value (float): + Double type feature value. + + This field is a member of `oneof`_ ``value``. + int64_value (int): + Int64 feature value. + + This field is a member of `oneof`_ ``value``. + string_value (str): + String feature value. + + This field is a member of `oneof`_ ``value``. + bool_array_value (google.cloud.aiplatform_v1beta1.types.BoolArray): + A list of bool type feature value. + + This field is a member of `oneof`_ ``value``. + double_array_value (google.cloud.aiplatform_v1beta1.types.DoubleArray): + A list of double type feature value. + + This field is a member of `oneof`_ ``value``. + int64_array_value (google.cloud.aiplatform_v1beta1.types.Int64Array): + A list of int64 type feature value. + + This field is a member of `oneof`_ ``value``. + string_array_value (google.cloud.aiplatform_v1beta1.types.StringArray): + A list of string type feature value. + + This field is a member of `oneof`_ ``value``. + bytes_value (bytes): + Bytes feature value. + + This field is a member of `oneof`_ ``value``. + metadata (google.cloud.aiplatform_v1beta1.types.FeatureValue.Metadata): + Metadata of feature value. + """ + + class Metadata(proto.Message): + r"""Metadata of feature value. + + Attributes: + generate_time (google.protobuf.timestamp_pb2.Timestamp): + Feature generation timestamp. Typically, it + is provided by user at feature ingestion time. + If not, feature store will use the system + timestamp when the data is ingested into feature + store. For streaming ingestion, the time, + aligned by days, must be no older than five + years (1825 days) and no later than one year + (366 days) in the future. + """ + + generate_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + + bool_value = proto.Field( + proto.BOOL, + number=1, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=2, + oneof='value', + ) + int64_value = proto.Field( + proto.INT64, + number=5, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=6, + oneof='value', + ) + bool_array_value = proto.Field( + proto.MESSAGE, + number=7, + oneof='value', + message=types.BoolArray, + ) + double_array_value = proto.Field( + proto.MESSAGE, + number=8, + oneof='value', + message=types.DoubleArray, + ) + int64_array_value = proto.Field( + proto.MESSAGE, + number=11, + oneof='value', + message=types.Int64Array, + ) + string_array_value = proto.Field( + proto.MESSAGE, + number=12, + oneof='value', + message=types.StringArray, + ) + bytes_value = proto.Field( + proto.BYTES, + number=13, + oneof='value', + ) + metadata = proto.Field( + proto.MESSAGE, + number=14, + message=Metadata, + ) + + +class FeatureValueList(proto.Message): + r"""Container for list of values. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureValue]): + A list of feature values. All of them should + be the same data type. + """ + + values = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='FeatureValue', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py new file mode 100644 index 0000000000..cb874a9c05 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -0,0 +1,1665 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateFeaturestoreRequest', + 'GetFeaturestoreRequest', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'UpdateFeaturestoreRequest', + 'DeleteFeaturestoreRequest', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'BatchReadFeatureValuesRequest', + 'ExportFeatureValuesRequest', + 'DestinationFeatureSetting', + 'FeatureValueDestination', + 'ExportFeatureValuesResponse', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeRequest', + 'GetEntityTypeRequest', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'UpdateEntityTypeRequest', + 'DeleteEntityTypeRequest', + 'CreateFeatureRequest', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'GetFeatureRequest', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateFeatureRequest', + 'DeleteFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreOperationMetadata', + 'ImportFeatureValuesOperationMetadata', + 'ExportFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesOperationMetadata', + 'CreateEntityTypeOperationMetadata', + 'CreateFeatureOperationMetadata', + 'BatchCreateFeaturesOperationMetadata', + }, +) + + +class CreateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + + Attributes: + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore to create. + featurestore_id (str): + Required. The ID to use for this Featurestore, which will + become the final component of the Featurestore's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within the project and location. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + featurestore = proto.Field( + proto.MESSAGE, + number=2, + message=gca_featurestore.Featurestore, + ) + featurestore_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore + resource. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListFeaturestoresRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the featurestores that match the filter expression. + The following fields are supported: + + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``online_serving_config.fixed_node_count``: Supports + ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` + comparisons. + - ``labels``: Supports key-value equality and key presence. + + Examples: + + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + Featurestores created or updated after 2020-01-01. + - ``labels.env = "prod"`` Featurestores with label "env" + set to "prod". + page_size (int): + The maximum number of Featurestores to + return. The service may return fewer than this + value. If unspecified, at most 100 Featurestores + will be returned. The maximum value is 100; any + value greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported Fields: + + - ``create_time`` + - ``update_time`` + - ``online_serving_config.fixed_node_count`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListFeaturestoresResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Attributes: + featurestores (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore]): + The Featurestores matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturestoresRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + featurestores = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_featurestore.Featurestore, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + + Attributes: + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Featurestore resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + """ + + featurestore = proto.Field( + proto.MESSAGE, + number=1, + message=gca_featurestore.Featurestore, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + force (bool): + If set to true, any EntityTypes and Features + for this Featurestore will also be deleted. + (Otherwise, the request will only work if the + Featurestore has no EntityTypes.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class ImportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + avro_source (google.cloud.aiplatform_v1beta1.types.AvroSource): + + This field is a member of `oneof`_ ``source``. + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + + This field is a member of `oneof`_ ``source``. + csv_source (google.cloud.aiplatform_v1beta1.types.CsvSource): + + This field is a member of `oneof`_ ``source``. + feature_time_field (str): + Source column that holds the Feature + timestamp for all Feature values in each entity. + + This field is a member of `oneof`_ ``feature_time_source``. + feature_time (google.protobuf.timestamp_pb2.Timestamp): + Single Feature timestamp for all entities + being imported. The timestamp must not have + higher than millisecond precision. + + This field is a member of `oneof`_ ``feature_time_source``. + entity_type (str): + Required. The resource name of the EntityType grouping the + Features for which values are being imported. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + entity_id_field (str): + Source column that holds entity IDs. If not provided, entity + IDs are extracted from the column named ``entity_id``. + feature_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest.FeatureSpec]): + Required. Specifications defining which Feature values to + import from the entity. The request fails if no + feature_specs are provided, and having multiple + feature_specs for one Feature is not allowed. + disable_online_serving (bool): + If set, data will not be imported for online + serving. This is typically used for backfilling, + where Feature generation timestamps are not in + the timestamp range needed for online serving. + worker_count (int): + Specifies the number of workers that are used + to write data to the Featurestore. Consider the + online serving capacity that you require to + achieve the desired import throughput without + interfering with online serving. The value must + be positive, and less than or equal to 100. If + not set, defaults to using 1 worker. The low + count ensures minimal impact on online serving + performance. + disable_ingestion_analysis (bool): + If true, API doesn't start ingestion analysis + pipeline. + """ + + class FeatureSpec(proto.Message): + r"""Defines the Feature value(s) to import. + + Attributes: + id (str): + Required. ID of the Feature to import values + of. This Feature must exist in the target + EntityType, or the request will fail. + source_field (str): + Source column to get the Feature values from. + If not set, uses the column with the same name + as the Feature ID. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + source_field = proto.Field( + proto.STRING, + number=2, + ) + + avro_source = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message=io.AvroSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message=io.BigQuerySource, + ) + csv_source = proto.Field( + proto.MESSAGE, + number=4, + oneof='source', + message=io.CsvSource, + ) + feature_time_field = proto.Field( + proto.STRING, + number=6, + oneof='feature_time_source', + ) + feature_time = proto.Field( + proto.MESSAGE, + number=7, + oneof='feature_time_source', + message=timestamp_pb2.Timestamp, + ) + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_id_field = proto.Field( + proto.STRING, + number=5, + ) + feature_specs = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=FeatureSpec, + ) + disable_online_serving = proto.Field( + proto.BOOL, + number=9, + ) + worker_count = proto.Field( + proto.INT32, + number=11, + ) + disable_ingestion_analysis = proto.Field( + proto.BOOL, + number=12, + ) + + +class ImportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of Feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + imported_entity_count = proto.Field( + proto.INT64, + number=1, + ) + imported_feature_value_count = proto.Field( + proto.INT64, + number=2, + ) + invalid_row_count = proto.Field( + proto.INT64, + number=6, + ) + + +class BatchReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + csv_read_instances (google.cloud.aiplatform_v1beta1.types.CsvSource): + Each read instance consists of exactly one read timestamp + and one or more entity IDs identifying entities of the + corresponding EntityTypes whose Features are requested. + + Each output instance contains Feature values of requested + entities concatenated together as of the read time. + + An example read instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. + + An example output instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. + + Timestamp in each read instance must be millisecond-aligned. + + ``csv_read_instances`` are read instances stored in a + plain-text CSV file. The header should be: + [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp + + The columns can be in any order. + + Values in the timestamp column must use the RFC 3339 format, + e.g. ``2012-07-30T10:43:17.123Z``. + + This field is a member of `oneof`_ ``read_option``. + bigquery_read_instances (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + Similar to csv_read_instances, but from BigQuery source. + + This field is a member of `oneof`_ ``read_option``. + featurestore (str): + Required. The resource name of the Featurestore from which + to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): + Required. Specifies output location and + format. + pass_through_fields (Sequence[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest.PassThroughField]): + When not empty, the specified fields in the + \*_read_instances source will be joined as-is in the output, + in addition to those fields from the Featurestore Entity. + + For BigQuery source, the type of the pass-through values + will be automatically inferred. For CSV source, the + pass-through values will be passed as opaque bytes. + entity_type_specs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): + Required. Specifies EntityType grouping Features to read + values of and settings. Each EntityType referenced in + [BatchReadFeatureValuesRequest.entity_type_specs] must have + a column specifying entity IDs in the EntityType in + [BatchReadFeatureValuesRequest.request][] . + """ + + class PassThroughField(proto.Message): + r"""Describe pass-through fields in read_instance source. + + Attributes: + field_name (str): + Required. The name of the field in the CSV header or the + name of the column in BigQuery table. The naming restriction + is the same as + [Feature.name][google.cloud.aiplatform.v1beta1.Feature.name]. + """ + + field_name = proto.Field( + proto.STRING, + number=1, + ) + + class EntityTypeSpec(proto.Message): + r"""Selects Features of an EntityType to read values of and + specifies read settings. + + Attributes: + entity_type_id (str): + Required. ID of the EntityType to select Features. The + EntityType id is the + [entity_type_id][google.cloud.aiplatform.v1beta1.CreateEntityTypeRequest.entity_type_id] + specified during EntityType creation. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selectors choosing which Feature + values to read from the EntityType. + settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): + Per-Feature settings for the batch read. + """ + + entity_type_id = proto.Field( + proto.STRING, + number=1, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=2, + message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='DestinationFeatureSetting', + ) + + csv_read_instances = proto.Field( + proto.MESSAGE, + number=3, + oneof='read_option', + message=io.CsvSource, + ) + bigquery_read_instances = proto.Field( + proto.MESSAGE, + number=5, + oneof='read_option', + message=io.BigQuerySource, + ) + featurestore = proto.Field( + proto.STRING, + number=1, + ) + destination = proto.Field( + proto.MESSAGE, + number=4, + message='FeatureValueDestination', + ) + pass_through_fields = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=PassThroughField, + ) + entity_type_specs = proto.RepeatedField( + proto.MESSAGE, + number=7, + message=EntityTypeSpec, + ) + + +class ExportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + snapshot_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.SnapshotExport): + Exports the latest Feature values of all + entities of the EntityType within a time range. + + This field is a member of `oneof`_ ``mode``. + full_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.FullExport): + Exports all historical values of all entities + of the EntityType within a time range + + This field is a member of `oneof`_ ``mode``. + entity_type (str): + Required. The resource name of the EntityType from which to + export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): + Required. Specifies destination location and + format. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selects Features to export values + of. + settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): + Per-Feature export settings. + """ + + class SnapshotExport(proto.Message): + r"""Describes exporting the latest Feature values of all entities of the + EntityType between [start_time, snapshot_time]. + + Attributes: + snapshot_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. + """ + + snapshot_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + class FullExport(proto.Message): + r"""Describes exporting all historical Feature values of all entities of + the EntityType between [start_time, end_time]. + + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + + snapshot_export = proto.Field( + proto.MESSAGE, + number=3, + oneof='mode', + message=SnapshotExport, + ) + full_export = proto.Field( + proto.MESSAGE, + number=7, + oneof='mode', + message=FullExport, + ) + entity_type = proto.Field( + proto.STRING, + number=1, + ) + destination = proto.Field( + proto.MESSAGE, + number=4, + message='FeatureValueDestination', + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=5, + message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='DestinationFeatureSetting', + ) + + +class DestinationFeatureSetting(proto.Message): + r""" + + Attributes: + feature_id (str): + Required. The ID of the Feature to apply the + setting to. + destination_field (str): + Specify the field name in the export + destination. If not specified, Feature ID is + used. + """ + + feature_id = proto.Field( + proto.STRING, + number=1, + ) + destination_field = proto.Field( + proto.STRING, + number=2, + ) + + +class FeatureValueDestination(proto.Message): + r"""A destination location for Feature values and format. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + Output in BigQuery format. + [BigQueryDestination.output_uri][google.cloud.aiplatform.v1beta1.BigQueryDestination.output_uri] + in + [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1beta1.FeatureValueDestination.bigquery_destination] + must refer to a table. + + This field is a member of `oneof`_ ``destination``. + tfrecord_destination (google.cloud.aiplatform_v1beta1.types.TFRecordDestination): + Output in TFRecord format. + + Below are the mapping from Feature value type in + Featurestore to Feature value type in TFRecord: + + :: + + Value type in Featurestore | Value type in TFRecord + DOUBLE, DOUBLE_ARRAY | FLOAT_LIST + INT64, INT64_ARRAY | INT64_LIST + STRING, STRING_ARRAY, BYTES | BYTES_LIST + true -> byte_string("true"), false -> byte_string("false") + BOOL, BOOL_ARRAY (true, false) | BYTES_LIST + + This field is a member of `oneof`_ ``destination``. + csv_destination (google.cloud.aiplatform_v1beta1.types.CsvDestination): + Output in CSV format. Array Feature value + types are not allowed in CSV format. + + This field is a member of `oneof`_ ``destination``. + """ + + bigquery_destination = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message=io.BigQueryDestination, + ) + tfrecord_destination = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.TFRecordDestination, + ) + csv_destination = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.CsvDestination, + ) + + +class ExportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + """ + + +class BatchReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + """ + + +class CreateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to create + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + The EntityType to create. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within a featurestore. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + entity_type = proto.Field( + proto.MESSAGE, + number=2, + message=gca_entity_type.EntityType, + ) + entity_type_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + + Attributes: + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEntityTypesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + filter (str): + Lists the EntityTypes that match the filter expression. The + following filters are supported: + + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``>=``, and ``<=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + EntityTypes having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any EntityType which has a label + with 'env' as the key. + page_size (int): + The maximum number of EntityTypes to return. + The service may return fewer than this value. If + unspecified, at most 1000 EntityTypes will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. + + Supported fields: + + - ``entity_type_id`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListEntityTypesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Attributes: + entity_types (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType]): + The EntityTypes matching the request. + next_page_token (str): + A token, which can be sent as + [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1beta1.ListEntityTypesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + entity_types = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_entity_type.EntityType, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + + Attributes: + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the EntityType resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + """ + + entity_type = proto.Field( + proto.MESSAGE, + number=1, + message=gca_entity_type.EntityType, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteEntityTypeRequest(proto.Message): + r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. + + Attributes: + name (str): + Required. The name of the EntityType to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + force (bool): + If set to true, any Features for this + EntityType will also be deleted. (Otherwise, the + request will only work if the EntityType has no + Features.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class CreateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create a + Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature to create. + feature_id (str): + Required. The ID to use for the Feature, which will become + the final component of the Feature's resource name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within an EntityType. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + feature = proto.Field( + proto.MESSAGE, + number=2, + message=gca_feature.Feature, + ) + feature_id = proto.Field( + proto.STRING, + number=3, + ) + + +class BatchCreateFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create the + batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same parent + EntityType. The ``parent`` field in each child request + message can be omitted. If ``parent`` is set in a child + request, then the value must match the ``parent`` value in + this request message. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='CreateFeatureRequest', + ) + + +class BatchCreateFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features created. + """ + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + + +class GetFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + + Attributes: + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + filter (str): + Lists the Features that match the filter expression. The + following filters are supported: + + - ``value_type``: Supports = and != comparisons. + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + 'env' as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 1000 Features will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``feature_id`` + - ``value_type`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + latest_stats_count (int): + If set, return the most recent + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count] + of stats for each Feature in response. Valid value is [0, + 10]. If number of stats exists < + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count], + return all existing stats. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + latest_stats_count = proto.Field( + proto.INT32, + number=7, + ) + + +class ListFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class SearchFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Attributes: + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. Field-restricted + queries and filters can be combined using ``AND`` to form a + conjunction. + + A field query is in the form FIELD:QUERY. This implicitly + checks if QUERY exists as a substring within Feature's + FIELD. The QUERY and the FIELD are converted to a sequence + of words (i.e. tokens) for comparison. This is done by: + + - Removing leading/trailing whitespace and tokenizing the + search value. Characters that are not one of alphanumeric + ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are + treated as delimiters for tokens. ``*`` is treated as a + wildcard that matches characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double quotation + marks ("). With phrases, the order of the words is + important. Words in the phrase must be matching in order and + consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with ID + containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. + + Besides field queries, the following exact-match filters are + supported. The exact-match filters do not support wildcards. + Unlike field-restricted queries, exact-match filters are + case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + ``env`` as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 100 Features will be + returned. The maximum value is 100; any value + greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures], + except ``page_size``, must match the call that provided the + page token. + """ + + location = proto.Field( + proto.STRING, + number=1, + ) + query = proto.Field( + proto.STRING, + number=3, + ) + page_size = proto.Field( + proto.INT32, + number=4, + ) + page_token = proto.Field( + proto.STRING, + number=5, + ) + + +class SearchFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features matching the request. + + Fields returned: + + - ``name`` + - ``description`` + - ``labels`` + - ``create_time`` + - ``update_time`` + next_page_token (str): + A token, which can be sent as + [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + + Attributes: + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature's ``name`` field is used to identify + the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Features resource by the update. The fields specified + in the update_mask are relative to the resource, not the + full request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be overwritten. + Set the update_mask to ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``disable_monitoring`` + """ + + feature = proto.Field( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + + Attributes: + name (str): + Required. The name of the Features to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform create Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UpdateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform update Featurestore. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ImportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that perform import Feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore import + Feature values. + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of Feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + imported_entity_count = proto.Field( + proto.INT64, + number=2, + ) + imported_feature_value_count = proto.Field( + proto.INT64, + number=3, + ) + invalid_row_count = proto.Field( + proto.INT64, + number=6, + ) + + +class ExportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that exports Features values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore export + Feature values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class BatchReadFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that batch reads Feature values. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore batch + read Features values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateEntityTypeOperationMetadata(proto.Message): + r"""Details of operations that perform create EntityType. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for EntityType. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateFeatureOperationMetadata(proto.Message): + r"""Details of operations that perform create Feature. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class BatchCreateFeaturesOperationMetadata(proto.Message): + r"""Details of operations that perform batch create Features. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py new file mode 100644 index 0000000000..2c949e71a2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import study +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'HyperparameterTuningJob', + }, +) + + +class HyperparameterTuningJob(proto.Message): + r"""Represents a HyperparameterTuningJob. A + HyperparameterTuningJob has a Study specification and multiple + CustomJobs with identical CustomJob specification. + + Attributes: + name (str): + Output only. Resource name of the + HyperparameterTuningJob. + display_name (str): + Required. The display name of the + HyperparameterTuningJob. The name can be up to + 128 characters long and can be consist of any + UTF-8 characters. + study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): + Required. Study configuration of the + HyperparameterTuningJob. + max_trial_count (int): + Required. The desired total number of Trials. + parallel_trial_count (int): + Required. The desired number of Trials to run + in parallel. + max_failed_trial_count (int): + The number of failed Trials that need to be + seen before failing the HyperparameterTuningJob. + If set to 0, Vertex AI decides how many Trials + must fail before the whole job fails. + trial_job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): + Required. The spec of a trial job. The same + spec applies to the CustomJobs created in all + the trials. + trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + Output only. Trials of the + HyperparameterTuningJob. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob for the + first time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob entered + any of the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was most recently + updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize HyperparameterTuningJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key options for a + HyperparameterTuningJob. If this is set, then + all resources created by the + HyperparameterTuningJob will be encrypted with + the provided encryption key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + study_spec = proto.Field( + proto.MESSAGE, + number=4, + message=study.StudySpec, + ) + max_trial_count = proto.Field( + proto.INT32, + number=5, + ) + parallel_trial_count = proto.Field( + proto.INT32, + number=6, + ) + max_failed_trial_count = proto.Field( + proto.INT32, + number=7, + ) + trial_job_spec = proto.Field( + proto.MESSAGE, + number=8, + message=custom_job.CustomJobSpec, + ) + trials = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=study.Trial, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=15, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=17, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py new file mode 100644 index 0000000000..cc9aba51e7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Index', + }, +) + + +class Index(proto.Message): + r"""A representation of a collection of database items organized + in a way that allows for approximate nearest neighbor (a.k.a + ANN) algorithms search. + + Attributes: + name (str): + Output only. The resource name of the Index. + display_name (str): + Required. The display name of the Index. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Index. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Index, + that is specific to it. Unset if the Index does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + An additional information about the Index; the schema of the + metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri]. + deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]): + Output only. The pointers to DeployedIndexes + created from this Index. An Index can be only + deleted if all its DeployedIndexes had been + undeployed first. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Indexes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was most recently + updated. This also includes any update to the contents of + the Index. Note that Operations working on this Index may + have their + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] + a little after the value of this timestamp, yet that does + not mean their results are not already reflected in the + Index. Result of any successfully completed Operation on the + Index is reflected in it. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=4, + ) + metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, + number=7, + message=deployed_index_ref.DeployedIndexRef, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=9, + ) + create_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py new file mode 100644 index 0000000000..9c093db096 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'IndexEndpoint', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexPrivateEndpoints', + }, +) + + +class IndexEndpoint(proto.Message): + r"""Indexes are deployed into it. An IndexEndpoint can have + multiple DeployedIndexes. + + Attributes: + name (str): + Output only. The resource name of the + IndexEndpoint. + display_name (str): + Required. The display name of the + IndexEndpoint. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + description (str): + The description of the IndexEndpoint. + deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndex]): + Output only. The indexes deployed in this + endpoint. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your IndexEndpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was last updated. This timestamp + is not updated when the endpoint's + DeployedIndexes are updated, e.g. due to updates + of the original Indexes they are the deployments + of. + network (str): + Optional. The full name of the Google Compute Engine + `network `__ + to which the IndexEndpoint should be peered. + + Private services access must already be configured for the + network. If left unspecified, the Endpoint is not peered + with any network. + + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + and + [private_service_connect_config][google.cloud.aiplatform.v1beta1.IndexEndpoint.private_service_connect_config] + are mutually exclusive. + + `Format `__: + projects/{project}/global/networks/{network}. Where + {project} is a project number, as in '12345', and {network} + is network name. + enable_private_service_connect (bool): + Optional. Deprecated: If true, expose the IndexEndpoint via + private service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], + can be set. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='DeployedIndex', + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + create_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + network = proto.Field( + proto.STRING, + number=9, + ) + enable_private_service_connect = proto.Field( + proto.BOOL, + number=10, + ) + + +class DeployedIndex(proto.Message): + r"""A deployment of an Index. IndexEndpoints contain one or more + DeployedIndexes. + + Attributes: + id (str): + Required. The user specified ID of the + DeployedIndex. The ID can be up to 128 + characters long and must start with a letter and + only contain letters, numbers, and underscores. + The ID must be unique within the project it is + created in. + index (str): + Required. The name of the Index this is the + deployment of. We may refer to this Index as the + DeployedIndex's "original" Index. + display_name (str): + The display name of the DeployedIndex. If not provided upon + creation, the Index's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedIndex + was created. + private_endpoints (google.cloud.aiplatform_v1beta1.types.IndexPrivateEndpoints): + Output only. Provides paths for users to send requests + directly to the deployed index services running on Cloud via + private services access. This field is populated if + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + is configured. + index_sync_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The DeployedIndex may depend on various data on + its original Index. Additionally when certain changes to the + original Index are being done (e.g. when what the Index + contains is being changed) the DeployedIndex may be + asynchronously updated in the background to reflect this + changes. If this timestamp's value is at least the + [Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time] + of the original Index, it means that this DeployedIndex and + the original Index are in sync. If this timestamp is older, + then to see which updates this DeployedIndex already + contains (and which not), one must + [list][Operations.ListOperations] [Operations][Operation] + [working][Operation.name] on the original Index. Only the + successfully completed Operations with + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] + equal or before this sync time are contained in this + DeployedIndex. + automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): + Optional. A description of resources that the DeployedIndex + uses, which to large degree are decided by Vertex AI, and + optionally allows only a modest additional configuration. If + min_replica_count is not set, the default value is 2 (we + don't provide SLA when min_replica_count=1). If + max_replica_count is not set, the default value is + min_replica_count. The max allowed replica count is 1000. + dedicated_resources (google.cloud.aiplatform_v1beta1.types.DedicatedResources): + Optional. A description of resources that are dedicated to + the DeployedIndex, and that need a higher degree of manual + configuration. If min_replica_count is not set, the default + value is 2 (we don't provide SLA when min_replica_count=1). + If max_replica_count is not set, the default value is + min_replica_count. The max allowed replica count is 1000. + + Available machine types: n1-standard-16 n1-standard-32 + enable_access_logging (bool): + Optional. If true, private endpoint's access + logs are sent to StackDriver Logging. + These logs are like standard server access logs, + containing information like timestamp and + latency for each MatchRequest. + Note that Stackdriver logs may incur a cost, + especially if the deployed index receives a high + queries per second rate (QPS). Estimate your + costs before enabling this option. + deployed_index_auth_config (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig): + Optional. If set, the authentication is + enabled for the private endpoint. + reserved_ip_ranges (Sequence[str]): + Optional. A list of reserved ip ranges under + the VPC network that can be used for this + DeployedIndex. + If set, we will deploy the index within the + provided ip ranges. Otherwise, the index might + be deployed to any ip ranges under the provided + VPC network. + + The value sohuld be the name of the address + (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) + Example: 'vertex-ai-ip-range'. + deployment_group (str): + Optional. The deployment group can be no longer than 64 + characters (eg: 'test', 'prod'). If not set, we will use the + 'default' deployment group. + + Creating ``deployment_groups`` with ``reserved_ip_ranges`` + is a recommended practice when the peered network has + multiple peering ranges. This creates your deployments from + predictable IP spaces for easier traffic administration. + Also, one deployment_group (except 'default') can only be + used with the same reserved_ip_ranges which means if the + deployment_group has been used with reserved_ip_ranges: [a, + b, c], using it with [a, b] or [d, e] is disallowed. + + Note: we only support up to 5 deployment groups(not + including 'default'). + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + index = proto.Field( + proto.STRING, + number=2, + ) + display_name = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + private_endpoints = proto.Field( + proto.MESSAGE, + number=5, + message='IndexPrivateEndpoints', + ) + index_sync_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + automatic_resources = proto.Field( + proto.MESSAGE, + number=7, + message=machine_resources.AutomaticResources, + ) + dedicated_resources = proto.Field( + proto.MESSAGE, + number=16, + message=machine_resources.DedicatedResources, + ) + enable_access_logging = proto.Field( + proto.BOOL, + number=8, + ) + deployed_index_auth_config = proto.Field( + proto.MESSAGE, + number=9, + message='DeployedIndexAuthConfig', + ) + reserved_ip_ranges = proto.RepeatedField( + proto.STRING, + number=10, + ) + deployment_group = proto.Field( + proto.STRING, + number=11, + ) + + +class DeployedIndexAuthConfig(proto.Message): + r"""Used to set up the auth on the DeployedIndex's private + endpoint. + + Attributes: + auth_provider (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig.AuthProvider): + Defines the authentication provider that the + DeployedIndex uses. + """ + + class AuthProvider(proto.Message): + r"""Configuration for an authentication provider, including support for + `JSON Web Token + (JWT) `__. + + Attributes: + audiences (Sequence[str]): + The list of JWT + `audiences `__. + that are allowed to access. A JWT containing any of these + audiences will be accepted. + allowed_issuers (Sequence[str]): + A list of allowed JWT issuers. Each entry must be a valid + Google service account, in the following format: + + ``service-account-name@project-id.iam.gserviceaccount.com`` + """ + + audiences = proto.RepeatedField( + proto.STRING, + number=1, + ) + allowed_issuers = proto.RepeatedField( + proto.STRING, + number=2, + ) + + auth_provider = proto.Field( + proto.MESSAGE, + number=1, + message=AuthProvider, + ) + + +class IndexPrivateEndpoints(proto.Message): + r"""IndexPrivateEndpoints proto is used to provide paths for users to + send requests via private endpoints (e.g. private service access, + private service connect). To send request via private service + access, use match_grpc_address. To send request via private service + connect, use service_attachment. + + Attributes: + match_grpc_address (str): + Output only. The ip address used to send + match gRPC requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. + """ + + match_grpc_address = proto.Field( + proto.STRING, + number=1, + ) + service_attachment = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py new file mode 100644 index 0000000000..708b36a5ba --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateIndexEndpointRequest', + 'CreateIndexEndpointOperationMetadata', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'UpdateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'DeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UndeployIndexOperationMetadata', + 'MutateDeployedIndexRequest', + 'MutateDeployedIndexResponse', + 'MutateDeployedIndexOperationMetadata', + }, +) + + +class CreateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + index_endpoint = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.IndexEndpoint, + ) + + +class CreateIndexEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListIndexEndpointsRequest(proto.Message): + r"""Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``index_endpoint`` supports = and !=. ``index_endpoint`` + represents the IndexEndpoint ID, ie. the last segment of + the IndexEndpoint's + [resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name]. + - ``display_name`` supports =, != and regex() (uses + `re2 `__ + syntax) + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality + ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a + key"`. + + Some examples: + + - ``index_endpoint="1"`` + - ``display_name="myDisplayName"`` + - \`regex(display_name, "^A") -> The display name starts + with an A. + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token] + of the previous + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListIndexEndpointsResponse(proto.Message): + r"""Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + index_endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint]): + List of IndexEndpoints in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + index_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.IndexEndpoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + + Attributes: + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + index_endpoint = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.IndexEndpoint, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be created + within the IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + The DeployedIndex that had been deployed in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + deployed_index_id (str): + The unique index id specified by user + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +class UndeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource from which + to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index_id (str): + Required. The ID of the DeployedIndex to be + undeployed from the IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +class UndeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + """ + + +class UndeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class MutateDeployedIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + The DeployedIndex that had been updated in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + deployed_index_id (str): + The unique index id specified by user + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py new file mode 100644 index 0000000000..7a850f4f96 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateIndexRequest', + 'CreateIndexOperationMetadata', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'UpdateIndexRequest', + 'UpdateIndexOperationMetadata', + 'DeleteIndexRequest', + 'NearestNeighborSearchOperationMetadata', + }, +) + + +class CreateIndexRequest(proto.Message): + r"""Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Index in. Format: + ``projects/{project}/locations/{location}`` + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index.Index, + ) + + +class CreateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, + number=2, + message='NearestNeighborSearchOperationMetadata', + ) + + +class GetIndexRequest(proto.Message): + r"""Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + + Attributes: + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListIndexesRequest(proto.Message): + r"""Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Indexes. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexesResponse.next_page_token] + of the previous + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListIndexesResponse(proto.Message): + r"""Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Attributes: + indexes (Sequence[google.cloud.aiplatform_v1beta1.types.Index]): + List of indexes in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexesRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + indexes = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_index.Index, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateIndexRequest(proto.Message): + r"""Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + + Attributes: + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index which updates the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index.Index, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, + number=2, + message='NearestNeighborSearchOperationMetadata', + ) + + +class DeleteIndexRequest(proto.Message): + r"""Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + + Attributes: + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class NearestNeighborSearchOperationMetadata(proto.Message): + r"""Runtime operation metadata with regard to Matching Engine + Index. + + Attributes: + content_validation_stats (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.ContentValidationStats]): + The validation stats of the content (per file) to be + inserted or updated on the Matching Engine Index resource. + Populated if contentsDeltaUri is provided as part of + [Index.metadata][google.cloud.aiplatform.v1beta1.Index.metadata]. + Please note that, currently for those files that are broken + or has unsupported file format, we will not have the stats + for those files. + data_bytes_count (int): + The ingested data size in bytes. + """ + + class RecordError(proto.Message): + r""" + + Attributes: + error_type (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): + The error type of this record. + error_message (str): + A human-readable message that is shown to the user to help + them fix the error. Note that this message may change from + time to time, your code should check against error_type as + the source of truth. + source_gcs_uri (str): + Cloud Storage URI pointing to the original + file in user's bucket. + embedding_id (str): + Empty if the embedding id is failed to parse. + raw_record (str): + The original content of this record. + """ + class RecordErrorType(proto.Enum): + r"""""" + ERROR_TYPE_UNSPECIFIED = 0 + EMPTY_LINE = 1 + INVALID_JSON_SYNTAX = 2 + INVALID_CSV_SYNTAX = 3 + INVALID_AVRO_SYNTAX = 4 + INVALID_EMBEDDING_ID = 5 + EMBEDDING_SIZE_MISMATCH = 6 + NAMESPACE_MISSING = 7 + + error_type = proto.Field( + proto.ENUM, + number=1, + enum='NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType', + ) + error_message = proto.Field( + proto.STRING, + number=2, + ) + source_gcs_uri = proto.Field( + proto.STRING, + number=3, + ) + embedding_id = proto.Field( + proto.STRING, + number=4, + ) + raw_record = proto.Field( + proto.STRING, + number=5, + ) + + class ContentValidationStats(proto.Message): + r""" + + Attributes: + source_gcs_uri (str): + Cloud Storage URI pointing to the original + file in user's bucket. + valid_record_count (int): + Number of records in this file that were + successfully processed. + invalid_record_count (int): + Number of records in this file we skipped due + to validate errors. + partial_errors (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError]): + The detail information of the partial + failures encountered for those invalid records + that couldn't be parsed. Up to 50 partial errors + will be reported. + """ + + source_gcs_uri = proto.Field( + proto.STRING, + number=1, + ) + valid_record_count = proto.Field( + proto.INT64, + number=2, + ) + invalid_record_count = proto.Field( + proto.INT64, + number=3, + ) + partial_errors = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='NearestNeighborSearchOperationMetadata.RecordError', + ) + + content_validation_stats = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=ContentValidationStats, + ) + data_bytes_count = proto.Field( + proto.INT64, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py new file mode 100644 index 0000000000..13499cff15 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'AvroSource', + 'CsvSource', + 'GcsSource', + 'GcsDestination', + 'BigQuerySource', + 'BigQueryDestination', + 'CsvDestination', + 'TFRecordDestination', + 'ContainerRegistryDestination', + }, +) + + +class AvroSource(proto.Message): + r"""The storage details for Avro input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class CsvSource(proto.Message): + r"""The storage details for CSV input content. + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + + Attributes: + uris (Sequence[str]): + Required. Google Cloud Storage URI(-s) to the + input file(s). May contain wildcards. For more + information on wildcards, see + https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + """ + + uris = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output + directory. If the uri doesn't end with '/', a + '/' will be automatically appended. The + directory is created if it doesn't exist. + """ + + output_uri_prefix = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQuerySource(proto.Message): + r"""The BigQuery location for the input content. + + Attributes: + input_uri (str): + Required. BigQuery URI to a table, up to 2000 characters + long. Accepted forms: + + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + input_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQueryDestination(proto.Message): + r"""The BigQuery location for the output content. + + Attributes: + output_uri (str): + Required. BigQuery URI to a project or table, up to 2000 + characters long. + + When only the project is specified, the Dataset and Table is + created. When the full table reference is specified, the + Dataset must exist and table must not exist. + + Accepted forms: + + - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId`` or + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class CsvDestination(proto.Message): + r"""The storage details for CSV output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + message='GcsDestination', + ) + + +class TFRecordDestination(proto.Message): + r"""The storage details for TFRecord output content. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + message='GcsDestination', + ) + + +class ContainerRegistryDestination(proto.Message): + r"""The Container Registry location for the container image. + + Attributes: + output_uri (str): + Required. Container Registry URI of a container image. Only + Google Container Registry and Artifact Registry are + supported now. Accepted forms: + + - Google Container Registry path. For example: + ``gcr.io/projectId/imageName:tag``. + + - Artifact Registry path. For example: + ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. + + If a tag is not specified, "latest" will be used as the + default tag. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py new file mode 100644 index 0000000000..2a4abf511d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -0,0 +1,1093 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateCustomJobRequest', + 'GetCustomJobRequest', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'DeleteCustomJobRequest', + 'CancelCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'GetDataLabelingJobRequest', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'DeleteDataLabelingJobRequest', + 'CancelDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'GetHyperparameterTuningJobRequest', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'DeleteHyperparameterTuningJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'GetBatchPredictionJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'DeleteBatchPredictionJobRequest', + 'CancelBatchPredictionJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'GetModelDeploymentMonitoringJobRequest', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'UpdateModelDeploymentMonitoringJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + }, +) + + +class CreateCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + CustomJob in. Format: + ``projects/{project}/locations/{location}`` + custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): + Required. The CustomJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + custom_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_custom_job.CustomJob, + ) + + +class GetCustomJobRequest(proto.Message): + r"""Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListCustomJobsRequest(proto.Message): + r"""Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] + of the previous + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListCustomJobsResponse(proto.Message): + r"""Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + + Attributes: + custom_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob]): + List of CustomJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + custom_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_custom_job.CustomJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteCustomJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource to be deleted. + Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): + Required. The DataLabelingJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + data_labeling_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_data_labeling_job.DataLabelingJob, + ) + + +class GetDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDataLabelingJobsRequest(proto.Message): + r"""Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. FieldMask represents a + set of symbolic field paths. For example, the mask can be + ``paths: "name"``. The "name" here is a field in + DataLabelingJob. If this field is not set, all fields of the + DataLabelingJob are returned. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order by default. Use ``desc`` after a field name + for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataLabelingJobsResponse(proto.Message): + r"""Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Attributes: + data_labeling_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob]): + A list of DataLabelingJobs that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_labeling_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_data_labeling_job.DataLabelingJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + hyperparameter_tuning_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + + +class GetHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListHyperparameterTuningJobsRequest(proto.Message): + r"""Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] + of the previous + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListHyperparameterTuningJobsResponse(proto.Message): + r"""Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + + Attributes: + hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob]): + List of HyperparameterTuningJobs in the requested page. + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] + of the jobs will be not be returned. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + hyperparameter_tuning_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource + to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob to cancel. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): + Required. The BatchPredictionJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + batch_prediction_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + + +class GetBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListBatchPredictionJobsRequest(proto.Message): + r"""Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + - ``model_display_name`` supports = and != + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] + of the previous + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListBatchPredictionJobsResponse(proto.Message): + r"""Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + + Attributes: + batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob]): + List of BatchPredictionJobs in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + batch_prediction_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The ModelDeploymentMonitoringJob to + create + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + + +class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): + r"""Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + deployed_model_id (str): + Required. The DeployedModel ID of the + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + feature_display_name (str): + The feature display name. If specified, only return the + stats belonging to this feature. Format: + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name], + example: "user_destination". + objectives (Sequence[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest.StatsAnomaliesObjective]): + Required. Objectives of the stats to + retrieve. + page_size (int): + The standard list page size. + page_token (str): + A page token received from a previous + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The earliest timestamp of stats being + generated. If not set, indicates fetching stats + till the earliest possible one. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The latest timestamp of stats being + generated. If not set, indicates feching stats + till the latest possible one. + """ + + class StatsAnomaliesObjective(proto.Message): + r"""Stats requested for specific objective. + + Attributes: + type_ (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): + + top_feature_count (int): + If set, all attribution scores between + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] + and + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] + are fetched, and page token doesn't take affect in this + case. Only used to retrieve attribution score for the top + Features which has the highest attribution score in the + latest monitoring run. + """ + + type_ = proto.Field( + proto.ENUM, + number=1, + enum=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringObjectiveType, + ) + top_feature_count = proto.Field( + proto.INT32, + number=4, + ) + + model_deployment_monitoring_job = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + feature_display_name = proto.Field( + proto.STRING, + number=3, + ) + objectives = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=StatsAnomaliesObjective, + ) + page_size = proto.Field( + proto.INT32, + number=5, + ) + page_token = proto.Field( + proto.STRING, + number=6, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + + +class SearchModelDeploymentMonitoringStatsAnomaliesResponse(proto.Message): + r"""Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies]): + Stats retrieved for requested objectives. There are at most + 1000 + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats] + in the response. + next_page_token (str): + The page token that can be used by the next + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + """ + + @property + def raw_page(self): + return self + + monitoring_stats = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelDeploymentMonitoringJobsRequest(proto.Message): + r"""Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelDeploymentMonitoringJobsResponse(proto.Message): + r"""Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + model_deployment_monitoring_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob]): + A list of ModelDeploymentMonitoringJobs that + matches the specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + model_deployment_monitoring_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring configuration + which replaces the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask is used to specify the fields to + be overwritten in the ModelDeploymentMonitoringJob resource + by the update. The fields specified in the update_mask are + relative to the resource, not the full request. A field will + be overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in the + request will be overwritten. Set the update_mask to ``*`` to + override all fields. For the objective config, the user can + either provide the update mask for + model_deployment_monitoring_objective_configs or any + combination of its nested fields, such as: + model_deployment_monitoring_objective_configs.objective_config.training_dataset. + + Updatable fields: + + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + """ + + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the model monitoring job to + delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class PauseModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ResumeModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateModelDeploymentMonitoringJobOperationMetadata(proto.Message): + r"""Runtime operation information for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py new file mode 100644 index 0000000000..6016286a66 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'JobState', + }, +) + + +class JobState(proto.Enum): + r"""Describes the state of a job.""" + JOB_STATE_UNSPECIFIED = 0 + JOB_STATE_QUEUED = 1 + JOB_STATE_PENDING = 2 + JOB_STATE_RUNNING = 3 + JOB_STATE_SUCCEEDED = 4 + JOB_STATE_FAILED = 5 + JOB_STATE_CANCELLING = 6 + JOB_STATE_CANCELLED = 7 + JOB_STATE_PAUSED = 8 + JOB_STATE_EXPIRED = 9 + JOB_STATE_UPDATING = 10 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py new file mode 100644 index 0000000000..5fc6d9c32d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'LineageSubgraph', + }, +) + + +class LineageSubgraph(proto.Message): + r"""A subgraph of the overall lineage graph. Event edges connect + Artifact and Execution nodes. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + The Artifact nodes in the subgraph. + executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): + The Execution nodes in the subgraph. + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Event edges between Artifacts and + Executions in the subgraph. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=artifact.Artifact, + ) + executions = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=execution.Execution, + ) + events = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=event.Event, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py new file mode 100644 index 0000000000..082f3cbfec --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import accelerator_type as gca_accelerator_type + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MachineSpec', + 'DedicatedResources', + 'AutomaticResources', + 'BatchDedicatedResources', + 'ResourcesConsumed', + 'DiskSpec', + 'NfsMount', + 'AutoscalingMetricSpec', + }, +) + + +class MachineSpec(proto.Message): + r"""Specification of a single machine. + + Attributes: + machine_type (str): + Immutable. The type of the machine. + + See the `list of machine types supported for + prediction `__ + + See the `list of machine types supported for custom + training `__. + + For + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + this field is optional, and the default value is + ``n1-standard-2``. For + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] + or as part of + [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] + this field is required. + accelerator_type (google.cloud.aiplatform_v1beta1.types.AcceleratorType): + Immutable. The type of accelerator(s) that may be attached + to the machine as per + [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]. + accelerator_count (int): + The number of accelerators to attach to the + machine. + """ + + machine_type = proto.Field( + proto.STRING, + number=1, + ) + accelerator_type = proto.Field( + proto.ENUM, + number=2, + enum=gca_accelerator_type.AcceleratorType, + ) + accelerator_count = proto.Field( + proto.INT32, + number=3, + ) + + +class DedicatedResources(proto.Message): + r"""A description of resources that are dedicated to a + DeployedModel, and that need a higher degree of manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): + Required. Immutable. The specification of a + single machine used by the prediction. + min_replica_count (int): + Required. Immutable. The minimum number of + machine replicas this DeployedModel will be + always deployed on. This value must be greater + than or equal to 1. + If traffic against the DeployedModel increases, + it may dynamically be deployed onto more + replicas, and as traffic decreases, some of + these extra replicas may be freed. + max_replica_count (int): + Immutable. The maximum number of replicas this DeployedModel + may be deployed on when the traffic against it increases. If + the requested value is too large, the deployment will error, + but if deployment succeeds then the ability to scale the + model to that many replicas is guaranteed (barring service + outages). If traffic against the DeployedModel increases + beyond what its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is not provided, + will use + [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] + as the default value. + + The value of this field impacts the charge against Vertex + CPU and GPU quotas. Specifically, you will be charged for + (max_replica_count \* number of cores in the selected + machine type) and (max_replica_count \* number of GPUs per + replica in the selected machine type). + autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1beta1.types.AutoscalingMetricSpec]): + Immutable. The metric specifications that overrides a + resource utilization metric (CPU utilization, accelerator's + duty cycle, and so on) target value (default to 60 if not + set). At most one entry is allowed per metric. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] + is above 0, the autoscaling will be based on both CPU + utilization and accelerator's duty cycle metrics and scale + up when either metrics exceeds its target value while scale + down if both metrics are under their target value. The + default target value is 60 for both metrics. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] + is 0, the autoscaling will be based on CPU utilization + metric only with default target value 60 if not explicitly + set. + + For example, in the case of Online Prediction, if you want + to override target CPU utilization to 80, you should set + [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name] + to + ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + and + [autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target] + to ``80``. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + min_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + autoscaling_metric_specs = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='AutoscalingMetricSpec', + ) + + +class AutomaticResources(proto.Message): + r"""A description of resources that to large degree are decided + by Vertex AI, and require only a modest additional + configuration. Each Model supporting these resources documents + its specific guidelines. + + Attributes: + min_replica_count (int): + Immutable. The minimum number of replicas this DeployedModel + will be always deployed on. If traffic against it increases, + it may dynamically be deployed onto more replicas up to + [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], + and as traffic decreases, some of these extra replicas may + be freed. If the requested value is too large, the + deployment will error. + max_replica_count (int): + Immutable. The maximum number of replicas + this DeployedModel may be deployed on when the + traffic against it increases. If the requested + value is too large, the deployment will error, + but if deployment succeeds then the ability to + scale the model to that many replicas is + guaranteed (barring service outages). If traffic + against the DeployedModel increases beyond what + its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is + not provided, a no upper bound for scaling under + heavy traffic will be assume, though Vertex AI + may be unable to scale beyond certain replica + number. + """ + + min_replica_count = proto.Field( + proto.INT32, + number=1, + ) + max_replica_count = proto.Field( + proto.INT32, + number=2, + ) + + +class BatchDedicatedResources(proto.Message): + r"""A description of resources that are used for performing batch + operations, are dedicated to a Model, and need manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): + Required. Immutable. The specification of a + single machine. + starting_replica_count (int): + Immutable. The number of machine replicas used at the start + of the batch operation. If not set, Vertex AI decides + starting number, not greater than + [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] + max_replica_count (int): + Immutable. The maximum number of machine + replicas the batch operation may be scaled to. + The default value is 10. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + starting_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + + +class ResourcesConsumed(proto.Message): + r"""Statistics information about resource consumption. + + Attributes: + replica_hours (float): + Output only. The number of replica hours + used. Note that many replicas may run in + parallel, and additionally any given work may be + queued for some time. Therefore this value is + not strictly related to wall time. + """ + + replica_hours = proto.Field( + proto.DOUBLE, + number=1, + ) + + +class DiskSpec(proto.Message): + r"""Represents the spec of disk options. + + Attributes: + boot_disk_type (str): + Type of the boot disk (default is "pd-ssd"). + Valid values: "pd-ssd" (Persistent Disk Solid + State Drive) or "pd-standard" (Persistent Disk + Hard Disk Drive). + boot_disk_size_gb (int): + Size in GB of the boot disk (default is + 100GB). + """ + + boot_disk_type = proto.Field( + proto.STRING, + number=1, + ) + boot_disk_size_gb = proto.Field( + proto.INT32, + number=2, + ) + + +class NfsMount(proto.Message): + r"""Represents a mount configuration for Network File System + (NFS) to mount. + + Attributes: + server (str): + Required. IP address of the NFS server. + path (str): + Required. Source path exported from NFS server. Has to start + with '/', and combined with the ip address, it indicates the + source mount path in the form of ``server:path`` + mount_point (str): + Required. Destination mount path. The NFS will be mounted + for the user under /mnt/nfs/ + """ + + server = proto.Field( + proto.STRING, + number=1, + ) + path = proto.Field( + proto.STRING, + number=2, + ) + mount_point = proto.Field( + proto.STRING, + number=3, + ) + + +class AutoscalingMetricSpec(proto.Message): + r"""The metric specification that defines the target resource + utilization (CPU utilization, accelerator's duty cycle, and so + on) for calculating the desired replica count. + + Attributes: + metric_name (str): + Required. The resource metric name. Supported metrics: + + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + target (int): + The target resource utilization in percentage + (1% - 100%) for the given metric; once the real + usage deviates from the target by a certain + percentage, the machine replicas change. The + default value is 60 (representing 60%) if not + provided. + """ + + metric_name = proto.Field( + proto.STRING, + number=1, + ) + target = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py new file mode 100644 index 0000000000..c721537483 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ManualBatchTuningParameters', + }, +) + + +class ManualBatchTuningParameters(proto.Message): + r"""Manual batch tuning parameters. + + Attributes: + batch_size (int): + Immutable. The number of the records (e.g. + instances) of the operation given in each batch + to a machine replica. Machine type, and size of + a single record should be considered when + setting this parameter, higher value speeds up + the batch operation's execution, but too high + value will result in a whole batch not fitting + in a machine's memory, and the whole operation + will fail. + The default value is 64. + """ + + batch_size = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py new file mode 100644 index 0000000000..b2facccadf --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MetadataSchema', + }, +) + + +class MetadataSchema(proto.Message): + r"""Instance of a general MetadataSchema. + + Attributes: + name (str): + Output only. The resource name of the + MetadataSchema. + schema_version (str): + The version of the MetadataSchema. The version's format must + match the following regular expression: + ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to + order/compare different versions. Example: 1.0.0, 1.0.1, + etc. + schema (str): + Required. The raw YAML string representation of the + MetadataSchema. The combination of [MetadataSchema.version] + and the schema name given by ``title`` in + [MetadataSchema.schema] must be unique within a + MetadataStore. + + The schema is defined as an OpenAPI 3.0.2 `MetadataSchema + Object `__ + schema_type (google.cloud.aiplatform_v1beta1.types.MetadataSchema.MetadataSchemaType): + The type of the MetadataSchema. This is a + property that identifies which metadata types + will use the MetadataSchema. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataSchema was created. + description (str): + Description of the Metadata Schema + """ + class MetadataSchemaType(proto.Enum): + r"""Describes the type of the MetadataSchema.""" + METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 + ARTIFACT_TYPE = 1 + EXECUTION_TYPE = 2 + CONTEXT_TYPE = 3 + + name = proto.Field( + proto.STRING, + number=1, + ) + schema_version = proto.Field( + proto.STRING, + number=2, + ) + schema = proto.Field( + proto.STRING, + number=3, + ) + schema_type = proto.Field( + proto.ENUM, + number=4, + enum=MetadataSchemaType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + description = proto.Field( + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py new file mode 100644 index 0000000000..69d5a4bf24 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -0,0 +1,1479 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateMetadataStoreRequest', + 'CreateMetadataStoreOperationMetadata', + 'GetMetadataStoreRequest', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'DeleteMetadataStoreRequest', + 'DeleteMetadataStoreOperationMetadata', + 'CreateArtifactRequest', + 'GetArtifactRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'UpdateArtifactRequest', + 'DeleteArtifactRequest', + 'PurgeArtifactsRequest', + 'PurgeArtifactsResponse', + 'PurgeArtifactsMetadata', + 'CreateContextRequest', + 'GetContextRequest', + 'ListContextsRequest', + 'ListContextsResponse', + 'UpdateContextRequest', + 'DeleteContextRequest', + 'PurgeContextsRequest', + 'PurgeContextsResponse', + 'PurgeContextsMetadata', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'QueryContextLineageSubgraphRequest', + 'CreateExecutionRequest', + 'GetExecutionRequest', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'UpdateExecutionRequest', + 'DeleteExecutionRequest', + 'PurgeExecutionsRequest', + 'PurgeExecutionsResponse', + 'PurgeExecutionsMetadata', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'QueryExecutionInputsAndOutputsRequest', + 'CreateMetadataSchemaRequest', + 'GetMetadataSchemaRequest', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'QueryArtifactLineageSubgraphRequest', + }, +) + + +class CreateMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + + Attributes: + parent (str): + Required. The resource name of the Location where the + MetadataStore should be created. Format: + ``projects/{project}/locations/{location}/`` + metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): + Required. The MetadataStore to create. + metadata_store_id (str): + The {metadatastore} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataStore.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + metadata_store = proto.Field( + proto.MESSAGE, + number=2, + message=gca_metadata_store.MetadataStore, + ) + metadata_store_id = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for creating a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the MetadataStore to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMetadataStoresRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Attributes: + parent (str): + Required. The Location whose MetadataStores should be + listed. Format: ``projects/{project}/locations/{location}`` + page_size (int): + The maximum number of Metadata Stores to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListMetadataStoresResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Attributes: + metadata_stores (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataStore]): + The MetadataStores found for the Location. + next_page_token (str): + A token, which can be sent as + [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataStoresRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_stores = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_metadata_store.MetadataStore, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the MetadataStore to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + force (bool): + Deprecated: Field is no longer supported. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class DeleteMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for deleting a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Artifact should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact to create. + artifact_id (str): + The {artifact} portion of the resource name with the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + If not provided, the Artifact's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Artifacts in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Artifact.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + artifact = proto.Field( + proto.MESSAGE, + number=2, + message=gca_artifact.Artifact, + ) + artifact_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Attributes: + parent (str): + Required. The MetadataStore whose Artifacts should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Artifacts to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + The supported set of filters include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Artifacts based on + the contexts to which they belong, use the function + operator with the full resource name + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + The Artifacts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1beta1.ListArtifactsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_artifact.Artifact, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + + Attributes: + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact containing updates. The Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not + found, a new + [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is + created. + """ + + artifact = proto.Field( + proto.MESSAGE, + number=1, + message=gca_artifact.Artifact, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + etag (str): + Optional. The etag of the Artifact to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + etag = proto.Field( + proto.STRING, + number=2, + ) + + +class PurgeArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. + + Attributes: + parent (str): + Required. The metadata store to purge Artifacts from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Artifacts to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Artifact names that would be deleted. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + force = proto.Field( + proto.BOOL, + number=3, + ) + + +class PurgeArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. + + Attributes: + purge_count (int): + The number of Artifacts that this request deleted (or, if + ``force`` is false, the number of Artifacts that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Artifact names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field( + proto.INT64, + number=1, + ) + purge_sample = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class PurgeArtifactsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for purging Artifacts. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateContextRequest(proto.Message): + r"""Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Context should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context to create. + context_id (str): + The {context} portion of the resource name with the format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. + If not provided, the Context's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Contexts in the parent MetadataStore. (Otherwise the request + will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the + caller can't view the preexisting Context.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + context = proto.Field( + proto.MESSAGE, + number=2, + message=gca_context.Context, + ) + context_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetContextRequest(proto.Message): + r"""Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + + Attributes: + name (str): + Required. The resource name of the Context to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListContextsRequest(proto.Message): + r"""Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + + Attributes: + parent (str): + Required. The MetadataStore whose Contexts should be listed. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Contexts to return. The + service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Contexts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + Following are the supported set of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0``. + + - **Parent Child filtering**: To filter Contexts based on + parent-child relationship use the HAS operator as + follows: + + :: + + parent_contexts: + "projects//locations//metadataStores//contexts/" + child_contexts: + "projects//locations//metadataStores//contexts/" + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListContextsResponse(proto.Message): + r"""Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Attributes: + contexts (Sequence[google.cloud.aiplatform_v1beta1.types.Context]): + The Contexts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListContextsRequest.page_token][google.cloud.aiplatform.v1beta1.ListContextsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + contexts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_context.Context, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateContextRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + + Attributes: + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Context][google.cloud.aiplatform.v1beta1.Context] is not + found, a new + [Context][google.cloud.aiplatform.v1beta1.Context] is + created. + """ + + context = proto.Field( + proto.MESSAGE, + number=1, + message=gca_context.Context, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteContextRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + + Attributes: + name (str): + Required. The resource name of the Context to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + force (bool): + The force deletion semantics is still + undefined. Users should not use this field. + etag (str): + Optional. The etag of the Context to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + etag = proto.Field( + proto.STRING, + number=3, + ) + + +class PurgeContextsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. + + Attributes: + parent (str): + Required. The metadata store to purge Contexts from. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Contexts to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Context names that would be deleted. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + force = proto.Field( + proto.BOOL, + number=3, + ) + + +class PurgeContextsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. + + Attributes: + purge_count (int): + The number of Contexts that this request deleted (or, if + ``force`` is false, the number of Contexts that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Context names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field( + proto.INT64, + number=1, + ) + purge_sample = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class PurgeContextsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for purging Contexts. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class AddContextArtifactsAndExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + Attributes: + context (str): + Required. The resource name of the Context that the + Artifacts and Executions belong to. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + artifacts (Sequence[str]): + The resource names of the Artifacts to attribute to the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + executions (Sequence[str]): + The resource names of the Executions to associate with the + Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + artifacts = proto.RepeatedField( + proto.STRING, + number=2, + ) + executions = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class AddContextArtifactsAndExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + + +class AddContextChildrenRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + Attributes: + context (str): + Required. The resource name of the parent Context. + + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + child_contexts (Sequence[str]): + The resource names of the child Contexts. + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + child_contexts = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class AddContextChildrenResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + """ + + +class QueryContextLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + + Attributes: + context (str): + Required. The resource name of the Context whose Artifacts + and Executions should be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + Execution should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution to create. + execution_id (str): + The {execution} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + If not provided, the Execution's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are ``/[a-z][0-9]-/``. Must be unique across all + Executions in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Execution.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + execution = proto.Field( + proto.MESSAGE, + number=2, + message=gca_execution.Execution, + ) + execution_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution to retrieve. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Attributes: + parent (str): + Required. The MetadataStore whose Executions should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of Executions to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with an INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Executions + to satisfy in order to be part of the result set. The syntax + to define filter query is based on + https://google.aip.dev/160. Following are the supported set + of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``state``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..`` For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Executions based + on the contexts to which they belong use the function + operator with the full resource name: + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Attributes: + executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): + The Executions retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExecutionsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + executions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_execution.Execution, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + + Attributes: + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution containing updates. The Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A FieldMask indicating which fields + should be updated. Functionality of this field + is not yet supported. + allow_missing (bool): + If set to true, and the + [Execution][google.cloud.aiplatform.v1beta1.Execution] is + not found, a new + [Execution][google.cloud.aiplatform.v1beta1.Execution] is + created. + """ + + execution = proto.Field( + proto.MESSAGE, + number=1, + message=gca_execution.Execution, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution to delete. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + etag (str): + Optional. The etag of the Execution to delete. If this is + provided, it must match the server's etag. Otherwise, the + request will fail with a FAILED_PRECONDITION. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + etag = proto.Field( + proto.STRING, + number=2, + ) + + +class PurgeExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. + + Attributes: + parent (str): + Required. The metadata store to purge Executions from. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + filter (str): + Required. A required filter matching the Executions to be + purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. + force (bool): + Optional. Flag to indicate to actually perform the purge. If + ``force`` is set to false, the method will return a sample + of Execution names that would be deleted. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + force = proto.Field( + proto.BOOL, + number=3, + ) + + +class PurgeExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. + + Attributes: + purge_count (int): + The number of Executions that this request deleted (or, if + ``force`` is false, the number of Executions that will be + deleted). This can be an estimate. + purge_sample (Sequence[str]): + A sample of the Execution names that will be deleted. Only + populated if ``force`` is set to false. The maximum number + of samples is 100 (it is possible to return fewer). + """ + + purge_count = proto.Field( + proto.INT64, + number=1, + ) + purge_sample = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class PurgeExecutionsMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for purging Executions. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class AddExecutionEventsRequest(proto.Message): + r"""Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + Attributes: + execution (str): + Required. The resource name of the Execution that the Events + connect Artifacts with. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Events to create and add. + """ + + execution = proto.Field( + proto.STRING, + number=1, + ) + events = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=event.Event, + ) + + +class AddExecutionEventsResponse(proto.Message): + r"""Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + """ + + +class QueryExecutionInputsAndOutputsRequest(proto.Message): + r"""Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + + Attributes: + execution (str): + Required. The resource name of the Execution whose input and + output Artifacts should be retrieved as a LineageSubgraph. + Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` + """ + + execution = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + + Attributes: + parent (str): + Required. The resource name of the MetadataStore where the + MetadataSchema should be created. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): + Required. The MetadataSchema to create. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with the + format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are ``/[a-z][0-9]-/``. Must be + unique across all MetadataSchemas in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataSchema.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + metadata_schema = proto.Field( + proto.MESSAGE, + number=2, + message=gca_metadata_schema.MetadataSchema, + ) + metadata_schema_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + + Attributes: + name (str): + Required. The resource name of the MetadataSchema to + retrieve. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMetadataSchemasRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Attributes: + parent (str): + Required. The MetadataStore whose MetadataSchemas should be + listed. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` + page_size (int): + The maximum number of MetadataSchemas to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] + call. Provide this to retrieve the next page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + A query to filter available MetadataSchemas + for matching results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListMetadataSchemasResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Attributes: + metadata_schemas (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataSchema]): + The MetadataSchemas found for the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataSchemasRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_schemas = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_metadata_schema.MetadataSchema, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class QueryArtifactLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + + Attributes: + artifact (str): + Required. The resource name of the Artifact whose Lineage + needs to be retrieved as a LineageSubgraph. Format: + ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + max_hops (int): + Specifies the size of the lineage graph in terms of number + of hops from the specified artifact. Negative Value: + INVALID_ARGUMENT error is returned 0: Only input artifact is + returned. No value: Transitive closure is performed to + return the complete graph. + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the Lineage Subgraph. The + syntax to define filter query is based on + https://google.aip.dev/160. The supported set of filters + include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"`` Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + artifact = proto.Field( + proto.STRING, + number=1, + ) + max_hops = proto.Field( + proto.INT32, + number=2, + ) + filter = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py new file mode 100644 index 0000000000..ac0a6d6e7c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MetadataStore', + }, +) + + +class MetadataStore(proto.Message): + r"""Instance of a metadata store. Contains a set of metadata that + can be queried. + + Attributes: + name (str): + Output only. The resource name of the + MetadataStore instance. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was last updated. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Metadata Store. If set, this Metadata Store and + all sub-resources of this Metadata Store are + secured using this key. + description (str): + Description of the MetadataStore. + state (google.cloud.aiplatform_v1beta1.types.MetadataStore.MetadataStoreState): + Output only. State information of the + MetadataStore. + """ + + class MetadataStoreState(proto.Message): + r"""Represents state information for a MetadataStore. + + Attributes: + disk_utilization_bytes (int): + The disk utilization of the MetadataStore in + bytes. + """ + + disk_utilization_bytes = proto.Field( + proto.INT64, + number=1, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=5, + message=gca_encryption_spec.EncryptionSpec, + ) + description = proto.Field( + proto.STRING, + number=6, + ) + state = proto.Field( + proto.MESSAGE, + number=7, + message=MetadataStoreState, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py new file mode 100644 index 0000000000..32f34fea5e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MigratableResource', + }, +) + + +class MigratableResource(proto.Message): + r"""Represents one resource that exists in automl.googleapis.com, + datalabeling.googleapis.com or ml.googleapis.com. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + ml_engine_model_version (google.cloud.aiplatform_v1beta1.types.MigratableResource.MlEngineModelVersion): + Output only. Represents one Version in + ml.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + automl_model (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlModel): + Output only. Represents one Model in + automl.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + automl_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlDataset): + Output only. Represents one Dataset in + automl.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + data_labeling_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset): + Output only. Represents one Dataset in + datalabeling.googleapis.com. + + This field is a member of `oneof`_ ``resource``. + last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the last + migration attempt on this MigratableResource + started. Will not be set if there's no migration + attempt on this MigratableResource. + last_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MigratableResource was last updated. + """ + + class MlEngineModelVersion(proto.Message): + r"""Represents one model Version in ml.googleapis.com. + + Attributes: + endpoint (str): + The ml.googleapis.com endpoint that this model Version + currently lives in. Example values: + + - ml.googleapis.com + - us-centrall-ml.googleapis.com + - europe-west4-ml.googleapis.com + - asia-east1-ml.googleapis.com + version (str): + Full resource name of ml engine model Version. Format: + ``projects/{project}/models/{model}/versions/{version}``. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.STRING, + number=2, + ) + + class AutomlModel(proto.Message): + r"""Represents one Model in automl.googleapis.com. + + Attributes: + model (str): + Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + The Model's display name in + automl.googleapis.com. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class AutomlDataset(proto.Message): + r"""Represents one Dataset in automl.googleapis.com. + + Attributes: + dataset (str): + Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + automl.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + + class DataLabelingDataset(proto.Message): + r"""Represents one Dataset in datalabeling.googleapis.com. + + Attributes: + dataset (str): + Full resource name of data labeling Dataset. Format: + ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + datalabeling.googleapis.com. + data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): + The migratable AnnotatedDataset in + datalabeling.googleapis.com belongs to the data + labeling Dataset. + """ + + class DataLabelingAnnotatedDataset(proto.Message): + r"""Represents one AnnotatedDataset in + datalabeling.googleapis.com. + + Attributes: + annotated_dataset (str): + Full resource name of data labeling AnnotatedDataset. + Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + annotated_dataset_display_name (str): + The AnnotatedDataset's display name in + datalabeling.googleapis.com. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + annotated_dataset_display_name = proto.Field( + proto.STRING, + number=3, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + data_labeling_annotated_datasets = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', + ) + + ml_engine_model_version = proto.Field( + proto.MESSAGE, + number=1, + oneof='resource', + message=MlEngineModelVersion, + ) + automl_model = proto.Field( + proto.MESSAGE, + number=2, + oneof='resource', + message=AutomlModel, + ) + automl_dataset = proto.Field( + proto.MESSAGE, + number=3, + oneof='resource', + message=AutomlDataset, + ) + data_labeling_dataset = proto.Field( + proto.MESSAGE, + number=4, + oneof='resource', + message=DataLabelingDataset, + ) + last_migrate_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + last_update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py new file mode 100644 index 0000000000..079ad2015a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py @@ -0,0 +1,479 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migratable_resource as gca_migratable_resource +from google.cloud.aiplatform_v1beta1.types import operation +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'BatchMigrateResourcesRequest', + 'MigrateResourceRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceResponse', + 'BatchMigrateResourcesOperationMetadata', + }, +) + + +class SearchMigratableResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Attributes: + parent (str): + Required. The location that the migratable resources should + be searched from. It's the Vertex AI location that the + resources can be migrated to, not the resources' original + location. Format: + ``projects/{project}/locations/{location}`` + page_size (int): + The standard page size. + The default and maximum value is 100. + page_token (str): + The standard page token. + filter (str): + A filter for your search. You can use the following types of + filters: + + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]: + + - ``ml_engine_model_version:*`` + - ``automl_model:*`` + - ``automl_dataset:*`` + - ``data_labeling_dataset:*`` + + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: + + - ``last_migrate_time:*`` filters for migrated + resources. + - ``NOT last_migrate_time:*`` filters for not yet + migrated resources. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class SearchMigratableResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Attributes: + migratable_resources (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource]): + All migratable resources that can be migrated + to the location specified in the request. + next_page_token (str): + The standard next-page token. The migratable_resources may + not fill page_size in SearchMigratableResourcesRequest even + when there are subsequent pages. + """ + + @property + def raw_page(self): + return self + + migratable_resources = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_migratable_resource.MigratableResource, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class BatchMigrateResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + Attributes: + parent (str): + Required. The location of the migrated resource will live + in. Format: ``projects/{project}/locations/{location}`` + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): + Required. The request messages specifying the + resources to migrate. They must be in the same + location as the destination. Up to 50 resources + can be migrated in one batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + migrate_resource_requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='MigrateResourceRequest', + ) + + +class MigrateResourceRequest(proto.Message): + r"""Config of migrating one resource from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): + Config for migrating Version in + ml.googleapis.com to Vertex AI's Model. + + This field is a member of `oneof`_ ``request``. + migrate_automl_model_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlModelConfig): + Config for migrating Model in + automl.googleapis.com to Vertex AI's Model. + + This field is a member of `oneof`_ ``request``. + migrate_automl_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): + Config for migrating Dataset in + automl.googleapis.com to Vertex AI's Dataset. + + This field is a member of `oneof`_ ``request``. + migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): + Config for migrating Dataset in + datalabeling.googleapis.com to Vertex AI's + Dataset. + + This field is a member of `oneof`_ ``request``. + """ + + class MigrateMlEngineModelVersionConfig(proto.Message): + r"""Config for migrating version in ml.googleapis.com to Vertex + AI's Model. + + Attributes: + endpoint (str): + Required. The ml.googleapis.com endpoint that this model + version should be migrated from. Example values: + + - ml.googleapis.com + + - us-centrall-ml.googleapis.com + + - europe-west4-ml.googleapis.com + + - asia-east1-ml.googleapis.com + model_version (str): + Required. Full resource name of ml engine model version. + Format: + ``projects/{project}/models/{model}/versions/{version}``. + model_display_name (str): + Required. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + model_version = proto.Field( + proto.STRING, + number=2, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class MigrateAutomlModelConfig(proto.Message): + r"""Config for migrating Model in automl.googleapis.com to Vertex + AI's Model. + + Attributes: + model (str): + Required. Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + Optional. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateAutomlDatasetConfig(proto.Message): + r"""Config for migrating Dataset in automl.googleapis.com to + Vertex AI's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + Required. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateDataLabelingDatasetConfig(proto.Message): + r"""Config for migrating Dataset in datalabeling.googleapis.com + to Vertex AI's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of data labeling Dataset. + Format: ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + Optional. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): + Optional. Configs for migrating + AnnotatedDataset in datalabeling.googleapis.com + to Vertex AI's SavedQuery. The specified + AnnotatedDatasets have to belong to the + datalabeling Dataset. + """ + + class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): + r"""Config for migrating AnnotatedDataset in + datalabeling.googleapis.com to Vertex AI's SavedQuery. + + Attributes: + annotated_dataset (str): + Required. Full resource name of data labeling + AnnotatedDataset. Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', + ) + + migrate_ml_engine_model_version_config = proto.Field( + proto.MESSAGE, + number=1, + oneof='request', + message=MigrateMlEngineModelVersionConfig, + ) + migrate_automl_model_config = proto.Field( + proto.MESSAGE, + number=2, + oneof='request', + message=MigrateAutomlModelConfig, + ) + migrate_automl_dataset_config = proto.Field( + proto.MESSAGE, + number=3, + oneof='request', + message=MigrateAutomlDatasetConfig, + ) + migrate_data_labeling_dataset_config = proto.Field( + proto.MESSAGE, + number=4, + oneof='request', + message=MigrateDataLabelingDatasetConfig, + ) + + +class BatchMigrateResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + Attributes: + migrate_resource_responses (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceResponse]): + Successfully migrated resources. + """ + + migrate_resource_responses = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='MigrateResourceResponse', + ) + + +class MigrateResourceResponse(proto.Message): + r"""Describes a successfully migrated resource. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + dataset (str): + Migrated Dataset's resource name. + + This field is a member of `oneof`_ ``migrated_resource``. + model (str): + Migrated Model's resource name. + + This field is a member of `oneof`_ ``migrated_resource``. + migratable_resource (google.cloud.aiplatform_v1beta1.types.MigratableResource): + Before migration, the identifier in + ml.googleapis.com, automl.googleapis.com or + datalabeling.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + oneof='migrated_resource', + ) + model = proto.Field( + proto.STRING, + number=2, + oneof='migrated_resource', + ) + migratable_resource = proto.Field( + proto.MESSAGE, + number=3, + message=gca_migratable_resource.MigratableResource, + ) + + +class BatchMigrateResourcesOperationMetadata(proto.Message): + r"""Runtime operation information for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + partial_results (Sequence[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): + Partial results that reflect the latest + migration operation progress. + """ + + class PartialResult(proto.Message): + r"""Represents a partial result in batch migration operation for one + [MigrateResourceRequest][google.cloud.aiplatform.v1beta1.MigrateResourceRequest]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + error (google.rpc.status_pb2.Status): + The error result of the migration request in + case of failure. + + This field is a member of `oneof`_ ``result``. + model (str): + Migrated model resource name. + + This field is a member of `oneof`_ ``result``. + dataset (str): + Migrated dataset resource name. + + This field is a member of `oneof`_ ``result``. + request (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest): + It's the same as the value in + [MigrateResourceRequest.migrate_resource_requests][]. + """ + + error = proto.Field( + proto.MESSAGE, + number=2, + oneof='result', + message=status_pb2.Status, + ) + model = proto.Field( + proto.STRING, + number=3, + oneof='result', + ) + dataset = proto.Field( + proto.STRING, + number=4, + oneof='result', + ) + request = proto.Field( + proto.MESSAGE, + number=1, + message='MigrateResourceRequest', + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + partial_results = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=PartialResult, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py new file mode 100644 index 0000000000..2eafbe8897 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py @@ -0,0 +1,800 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import explanation +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Model', + 'PredictSchemata', + 'ModelContainerSpec', + 'Port', + }, +) + + +class Model(proto.Message): + r"""A trained machine learning Model. + + Attributes: + name (str): + The resource name of the Model. + version_id (str): + Output only. Immutable. The version ID of the + model. A new version is committed when a new + model version is uploaded or trained under an + existing model id. It is an auto-incrementing + decimal number in string representation. + version_aliases (Sequence[str]): + User provided version aliases so that a model version can be + referenced via alias (i.e. + projects/{project}/locations/{location}/models/{model_id}@{version_alias} + instead of auto-generated version id (i.e. + projects/{project}/locations/{location}/models/{model_id}@{version_id}). + The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] to + distinguish from version_id. A default version alias will be + created for the first version of the model, and there must + be exactly one default version alias for a model. + version_create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this version was + created. + version_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this version was + most recently updated. + display_name (str): + Required. The display name of the Model. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Model. + version_description (str): + The description of this version. + predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): + The schemata that describe formats of the Model's + predictions and explanations as given and returned via + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + and + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Model, + that is specific to it. Unset if the Model does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no additional metadata is needed, this field is set to an + empty string. Note: The URI given on output will be + immutable and probably different, including the URI scheme, + than the one given on input. The output URI will point to a + location where the user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + Immutable. An additional information about the Model; the + schema of the metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri]. + Unset if the Model does not have any additional information. + supported_export_formats (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat]): + Output only. The formats in which this Model + may be exported. If empty, this Model is not + available for export. + training_pipeline (str): + Output only. The resource name of the + TrainingPipeline that uploaded this Model, if + any. + container_spec (google.cloud.aiplatform_v1beta1.types.ModelContainerSpec): + Input only. The specification of the container that is to be + used when deploying this Model. The specification is + ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], + and all binaries it contains are copied and stored + internally by Vertex AI. Not present for AutoML Models. + artifact_uri (str): + Immutable. The path to the directory + containing the Model artifact and any of its + supporting files. Not present for AutoML Models. + supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1beta1.types.Model.DeploymentResourcesType]): + Output only. When this Model is deployed, its prediction + resources are described by the ``prediction_resources`` + field of the + [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + object. Because not all Models support all resource + configuration types, the configuration types this Model + supports are listed here. If no configuration types are + listed, the Model cannot be deployed to an + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and + does not support online predictions + ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). + Such a Model can serve predictions by using a + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], + if it has at least one entry each in + [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] + and + [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + supported_input_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + exists, the instances should be given as per that schema. + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each instance is a + single line. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``csv`` The CSV format, where each instance is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record`` The TFRecord format, where each instance is + a single record in tfrecord syntax. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record-gzip`` Similar to ``tf-record``, but the file + is gzipped. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``bigquery`` Each instance is a single row in BigQuery. + Uses + [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. + + - ``file-list`` Each line of the file is the location of an + instance to process, uses ``gcs_source`` field of the + [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] + object. + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + supported_output_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + If both + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] + exist, the predictions are returned together with their + instances. In other words, the prediction has the original + instance data first, followed by the actual prediction + content (as per the schema). + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each prediction is + a single line. Uses + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``csv`` The CSV format, where each prediction is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``bigquery`` Each prediction is a single row in a + BigQuery table, uses + [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] + . + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + uploaded into Vertex AI. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + most recently updated. + deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModelRef]): + Output only. The pointers to DeployedModels + created from this Model. Note that Model could + have been deployed to Endpoints in different + Locations. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + The default explanation specification for this Model. + + The Model can be used for [requesting + explanation][PredictionService.Explain] after being + [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] + if it is populated. The Model can be used for [batch + explanation][BatchPredictionJob.generate_explanation] if it + is populated. + + All fields of the explanation_spec can be overridden by + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + of + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], + or + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + of + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + + If the default explanation specification is not set for this + Model, this Model can still be used for [requesting + explanation][PredictionService.Explain] by setting + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + of + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] + and for [batch + explanation][BatchPredictionJob.generate_explanation] by + setting + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + of + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Models. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Model. If set, this Model and all sub-resources + of this Model will be secured by this key. + """ + class DeploymentResourcesType(proto.Enum): + r"""Identifies a type of Model's prediction resources.""" + DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 + DEDICATED_RESOURCES = 1 + AUTOMATIC_RESOURCES = 2 + + class ExportFormat(proto.Message): + r"""Represents export format supported by the Model. + All formats export to Google Cloud Storage. + + Attributes: + id (str): + Output only. The ID of the export format. The possible + format IDs are: + + - ``tflite`` Used for Android mobile devices. + + - ``edgetpu-tflite`` Used for `Edge + TPU `__ devices. + + - ``tf-saved-model`` A tensorflow model in SavedModel + format. + + - ``tf-js`` A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + + - ``core-ml`` Used for iOS mobile devices. + + - ``custom-trained`` A Model that was uploaded or trained + by custom code. + exportable_contents (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat.ExportableContent]): + Output only. The content of this Model that + may be exported. + """ + class ExportableContent(proto.Enum): + r"""The Model content that can be exported.""" + EXPORTABLE_CONTENT_UNSPECIFIED = 0 + ARTIFACT = 1 + IMAGE = 2 + + id = proto.Field( + proto.STRING, + number=1, + ) + exportable_contents = proto.RepeatedField( + proto.ENUM, + number=2, + enum='Model.ExportFormat.ExportableContent', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + version_id = proto.Field( + proto.STRING, + number=28, + ) + version_aliases = proto.RepeatedField( + proto.STRING, + number=29, + ) + version_create_time = proto.Field( + proto.MESSAGE, + number=31, + message=timestamp_pb2.Timestamp, + ) + version_update_time = proto.Field( + proto.MESSAGE, + number=32, + message=timestamp_pb2.Timestamp, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + version_description = proto.Field( + proto.STRING, + number=30, + ) + predict_schemata = proto.Field( + proto.MESSAGE, + number=4, + message='PredictSchemata', + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=5, + ) + metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + supported_export_formats = proto.RepeatedField( + proto.MESSAGE, + number=20, + message=ExportFormat, + ) + training_pipeline = proto.Field( + proto.STRING, + number=7, + ) + container_spec = proto.Field( + proto.MESSAGE, + number=9, + message='ModelContainerSpec', + ) + artifact_uri = proto.Field( + proto.STRING, + number=26, + ) + supported_deployment_resources_types = proto.RepeatedField( + proto.ENUM, + number=10, + enum=DeploymentResourcesType, + ) + supported_input_storage_formats = proto.RepeatedField( + proto.STRING, + number=11, + ) + supported_output_storage_formats = proto.RepeatedField( + proto.STRING, + number=12, + ) + create_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=15, + message=deployed_model_ref.DeployedModelRef, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=23, + message=explanation.ExplanationSpec, + ) + etag = proto.Field( + proto.STRING, + number=16, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=17, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class PredictSchemata(proto.Message): + r"""Contains the schemata used in Model's predictions and explanations + via + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + and + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + + Attributes: + instance_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single instance, which + are used in + [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], + [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + and + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + parameters_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the parameters of prediction and + explanation via + [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], + [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] + and + [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no parameters are supported, then it is set to an empty + string. Note: The URI given on output will be immutable and + probably different, including the URI scheme, than the one + given on input. The output URI will point to a location + where the user only has a read access. + prediction_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single prediction + produced by this Model, which are returned via + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], + [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], + and + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + """ + + instance_schema_uri = proto.Field( + proto.STRING, + number=1, + ) + parameters_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + prediction_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelContainerSpec(proto.Message): + r"""Specification of a container for serving predictions. Some fields in + this message correspond to fields in the `Kubernetes Container v1 + core + specification `__. + + Attributes: + image_uri (str): + Required. Immutable. URI of the Docker image to be used as + the custom container for serving predictions. This URI must + identify an image in Artifact Registry or Container + Registry. Learn more about the `container publishing + requirements `__, + including permissions requirements for the Vertex AI Service + Agent. + + The container image is ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], + stored internally, and this original path is afterwards not + used. + + To learn about the requirements for the Docker image itself, + see `Custom container + requirements `__. + + You can use the URI to one of Vertex AI's `pre-built + container images for + prediction `__ + in this field. + command (Sequence[str]): + Immutable. Specifies the command that runs when the + container starts. This overrides the container's + `ENTRYPOINT `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``ENTRYPOINT``'s "exec" form, not its + "shell" form. + + If you do not specify this field, then the container's + ``ENTRYPOINT`` runs, in conjunction with the + [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] + field or the container's + ```CMD`` `__, + if either exists. If this field is not specified and the + container does not have an ``ENTRYPOINT``, then refer to the + Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` + interact `__. + + If you specify this field, then you can also specify the + ``args`` field to provide additional arguments for this + command. However, if you specify this field, then the + container's ``CMD`` is ignored. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``command`` field of the Kubernetes + Containers `v1 core + API `__. + args (Sequence[str]): + Immutable. Specifies arguments for the command that runs + when the container starts. This overrides the container's + ```CMD`` `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``CMD``'s "default parameters" form. + + If you don't specify this field but do specify the + [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] + field, then the command from the ``command`` field runs + without any additional arguments. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + If you don't specify this field and don't specify the + ``command`` field, then the container's + ```ENTRYPOINT`` `__ + and ``CMD`` determine what runs based on their default + behavior. See the Docker documentation about `how ``CMD`` + and ``ENTRYPOINT`` + interact `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``args`` field of the Kubernetes + Containers `v1 core + API `__. + env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]): + Immutable. List of environment variables to set in the + container. After the container starts running, code running + in the container can read these environment variables. + + Additionally, the + [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] + and + [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] + fields can reference these variables. Later entries in this + list can also reference earlier entries. For example, the + following example sets the variable ``VAR_2`` to have the + value ``foo bar``: + + .. code:: json + + [ + { + "name": "VAR_1", + "value": "foo" + }, + { + "name": "VAR_2", + "value": "$(VAR_1) bar" + } + ] + + If you switch the order of the variables in the example, + then the expansion does not occur. + + This field corresponds to the ``env`` field of the + Kubernetes Containers `v1 core + API `__. + ports (Sequence[google.cloud.aiplatform_v1beta1.types.Port]): + Immutable. List of ports to expose from the container. + Vertex AI sends any prediction requests that it receives to + the first port on this list. Vertex AI also sends `liveness + and health + checks `__ + to this port. + + If you do not specify this field, it defaults to following + value: + + .. code:: json + + [ + { + "containerPort": 8080 + } + ] + + Vertex AI does not use ports other than the first one + listed. This field corresponds to the ``ports`` field of the + Kubernetes Containers `v1 core + API `__. + predict_route (str): + Immutable. HTTP path on the container to send prediction + requests to. Vertex AI forwards requests sent using + [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + to this path on the container's IP address and port. Vertex + AI then returns the container's response in the API + response. + + For example, if you set this field to ``/foo``, then when + Vertex AI receives a prediction request, it forwards the + request body in a POST request to the ``/foo`` path on the + port of your container specified by the first value of this + ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + health_route (str): + Immutable. HTTP path on the container to send health checks + to. Vertex AI intermittently sends GET requests to this path + on the container's IP address and port to check that the + container is healthy. Read more about `health + checks `__. + + For example, if you set this field to ``/bar``, then Vertex + AI intermittently sends a GET request to the ``/bar`` path + on the port of your container specified by the first value + of this ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=env_var.EnvVar, + ) + ports = proto.RepeatedField( + proto.MESSAGE, + number=5, + message='Port', + ) + predict_route = proto.Field( + proto.STRING, + number=6, + ) + health_route = proto.Field( + proto.STRING, + number=7, + ) + + +class Port(proto.Message): + r"""Represents a network port in a container. + + Attributes: + container_port (int): + The number of the port to expose on the pod's + IP address. Must be a valid port number, between + 1 and 65535 inclusive. + """ + + container_port = proto.Field( + proto.INT32, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py new file mode 100644 index 0000000000..e373125298 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -0,0 +1,490 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelDeploymentMonitoringObjectiveType', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + }, +) + + +class ModelDeploymentMonitoringObjectiveType(proto.Enum): + r"""The Model Monitoring Objective types.""" + MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0 + RAW_FEATURE_SKEW = 1 + RAW_FEATURE_DRIFT = 2 + FEATURE_ATTRIBUTION_SKEW = 3 + FEATURE_ATTRIBUTION_DRIFT = 4 + + +class ModelDeploymentMonitoringJob(proto.Message): + r"""Represents a job that runs periodically to monitor the + deployed models in an endpoint. It will analyze the logged + training & prediction data to detect any abnormal behaviors. + + Attributes: + name (str): + Output only. Resource name of a + ModelDeploymentMonitoringJob. + display_name (str): + Required. The user-defined name of the + ModelDeploymentMonitoringJob. The name can be up + to 128 characters long and can be consist of any + UTF-8 characters. + Display name of a ModelDeploymentMonitoringJob. + endpoint (str): + Required. Endpoint resource name. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the + monitoring job. When the job is still creating, + the state will be 'PENDING'. Once the job is + successfully created, the state will be + 'RUNNING'. Pause the job, the state will be + 'PAUSED'. + Resume the job, the state will return to + 'RUNNING'. + schedule_state (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): + Output only. Schedule state when the + monitoring job is in Running state. + latest_monitoring_pipeline_metadata (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.LatestMonitoringPipelineMetadata): + Output only. Latest triggered monitoring + pipeline metadata. + model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveConfig]): + Required. The config for monitoring + objectives. This is a per DeployedModel config. + Each DeployedModel needs to be configured + separately. + model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringScheduleConfig): + Required. Schedule config for running the + monitoring job. + logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): + Required. Sample Strategy for logging. + model_monitoring_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig): + Alert config for model monitoring. + predict_instance_schema_uri (str): + YAML schema file uri describing the format of + a single instance, which are given to format + this Endpoint's prediction (and explanation). If + not set, we will generate predict schema from + collected predict requests. + sample_predict_instance (google.protobuf.struct_pb2.Value): + Sample Predict instance, same format as + [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], + this can be set as a replacement of + [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. + If not set, we will generate predict schema from collected + predict requests. + analysis_instance_schema_uri (str): + YAML schema file uri describing the format of a single + instance that you want Tensorflow Data Validation (TFDV) to + analyze. + + If this field is empty, all the feature data types are + inferred from + [predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri], + meaning that TFDV will use the data in the exact format(data + type) as prediction request/response. If there are any data + type differences between predict instance and TFDV instance, + this field can be used to override the schema. For models + trained with Vertex AI, this field must be set as all the + fields in predict instance formatted as string. + bigquery_tables (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable]): + Output only. The created bigquery tables for + the job under customer project. Customer could + do their own query & analysis. There could be 4 + log tables in maximum: + 1. Training data logging predict + request/response 2. Serving data logging predict + request/response + log_ttl (google.protobuf.duration_pb2.Duration): + The TTL of BigQuery tables in user projects + which stores logs. A day is the basic unit of + the TTL and we take the ceil of TTL/86400(a + day). e.g. { second: 3600} indicates ttl = 1 + day. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your ModelDeploymentMonitoringJob. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was updated most + recently. + next_schedule_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this monitoring + pipeline will be scheduled to run for the next + round. + stats_anomalies_base_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Stats anomalies base folder path. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + ModelDeploymentMonitoringJob. If set, this + ModelDeploymentMonitoringJob and all + sub-resources of this + ModelDeploymentMonitoringJob will be secured by + this key. + enable_monitoring_pipeline_logs (bool): + If true, the scheduled monitoring pipeline logs are sent to + Google Cloud Logging, including pipeline status and + anomalies detected. Please note the logs incur cost, which + are subject to `Cloud Logging + pricing `__. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. + """ + class MonitoringScheduleState(proto.Enum): + r"""The state to Specify the monitoring pipeline.""" + MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0 + PENDING = 1 + OFFLINE = 2 + RUNNING = 3 + + class LatestMonitoringPipelineMetadata(proto.Message): + r"""All metadata of most recent monitoring pipelines. + + Attributes: + run_time (google.protobuf.timestamp_pb2.Timestamp): + The time that most recent monitoring + pipelines that is related to this run. + status (google.rpc.status_pb2.Status): + The status of the most recent monitoring + pipeline. + """ + + run_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + endpoint = proto.Field( + proto.STRING, + number=3, + ) + state = proto.Field( + proto.ENUM, + number=4, + enum=job_state.JobState, + ) + schedule_state = proto.Field( + proto.ENUM, + number=5, + enum=MonitoringScheduleState, + ) + latest_monitoring_pipeline_metadata = proto.Field( + proto.MESSAGE, + number=25, + message=LatestMonitoringPipelineMetadata, + ) + model_deployment_monitoring_objective_configs = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='ModelDeploymentMonitoringObjectiveConfig', + ) + model_deployment_monitoring_schedule_config = proto.Field( + proto.MESSAGE, + number=7, + message='ModelDeploymentMonitoringScheduleConfig', + ) + logging_sampling_strategy = proto.Field( + proto.MESSAGE, + number=8, + message=model_monitoring.SamplingStrategy, + ) + model_monitoring_alert_config = proto.Field( + proto.MESSAGE, + number=15, + message=model_monitoring.ModelMonitoringAlertConfig, + ) + predict_instance_schema_uri = proto.Field( + proto.STRING, + number=9, + ) + sample_predict_instance = proto.Field( + proto.MESSAGE, + number=19, + message=struct_pb2.Value, + ) + analysis_instance_schema_uri = proto.Field( + proto.STRING, + number=16, + ) + bigquery_tables = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='ModelDeploymentMonitoringBigQueryTable', + ) + log_ttl = proto.Field( + proto.MESSAGE, + number=17, + message=duration_pb2.Duration, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + create_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + next_schedule_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + stats_anomalies_base_directory = proto.Field( + proto.MESSAGE, + number=20, + message=io.GcsDestination, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=21, + message=gca_encryption_spec.EncryptionSpec, + ) + enable_monitoring_pipeline_logs = proto.Field( + proto.BOOL, + number=22, + ) + error = proto.Field( + proto.MESSAGE, + number=23, + message=status_pb2.Status, + ) + + +class ModelDeploymentMonitoringBigQueryTable(proto.Message): + r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery + table name as well as some information of the logs stored in + this table. + + Attributes: + log_source (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogSource): + The source of log. + log_type (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogType): + The type of log. + bigquery_table_path (str): + The created BigQuery table to store logs. Customer could do + their own query & analysis. Format: + ``bq://.model_deployment_monitoring_._`` + """ + class LogSource(proto.Enum): + r"""Indicates where does the log come from.""" + LOG_SOURCE_UNSPECIFIED = 0 + TRAINING = 1 + SERVING = 2 + + class LogType(proto.Enum): + r"""Indicates what type of traffic does the log belong to.""" + LOG_TYPE_UNSPECIFIED = 0 + PREDICT = 1 + EXPLAIN = 2 + + log_source = proto.Field( + proto.ENUM, + number=1, + enum=LogSource, + ) + log_type = proto.Field( + proto.ENUM, + number=2, + enum=LogType, + ) + bigquery_table_path = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelDeploymentMonitoringObjectiveConfig(proto.Message): + r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of + deployed_model_id to ModelMonitoringObjectiveConfig. + + Attributes: + deployed_model_id (str): + The DeployedModel ID of the objective config. + objective_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig): + The objective config of for the + modelmonitoring job of this deployed model. + """ + + deployed_model_id = proto.Field( + proto.STRING, + number=1, + ) + objective_config = proto.Field( + proto.MESSAGE, + number=2, + message=model_monitoring.ModelMonitoringObjectiveConfig, + ) + + +class ModelDeploymentMonitoringScheduleConfig(proto.Message): + r"""The config for scheduling monitoring job. + + Attributes: + monitor_interval (google.protobuf.duration_pb2.Duration): + Required. The model monitoring job scheduling + interval. It will be rounded up to next full + hour. This defines how often the monitoring jobs + are triggered. + monitor_window (google.protobuf.duration_pb2.Duration): + The time window of the prediction data being included in + each prediction dataset. This window specifies how long the + data should be collected from historical model results for + each run. If not set, + [ModelDeploymentMonitoringScheduleConfig.monitor_interval][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringScheduleConfig.monitor_interval] + will be used. e.g. If currently the cutoff time is + 2022-01-08 14:30:00 and the monitor_window is set to be + 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 + 14:30:00 will be retrieved and aggregated to calculate the + monitoring statistics. + """ + + monitor_interval = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + monitor_window = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class ModelMonitoringStatsAnomalies(proto.Message): + r"""Statistics and anomalies generated by Model Monitoring. + + Attributes: + objective (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): + Model Monitoring Objective those stats and + anomalies belonging to. + deployed_model_id (str): + Deployed Model ID. + anomaly_count (int): + Number of anomalies within all stats. + feature_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]): + A list of historical Stats and Anomalies + generated for all Features. + """ + + class FeatureHistoricStatsAnomalies(proto.Message): + r"""Historical Stats (and Anomalies) for a specific Feature. + + Attributes: + feature_display_name (str): + Display Name of the Feature. + threshold (google.cloud.aiplatform_v1beta1.types.ThresholdConfig): + Threshold for anomaly detection. + training_stats (google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly): + Stats calculated for the Training Dataset. + prediction_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): + A list of historical stats generated by + different time window's Prediction Dataset. + """ + + feature_display_name = proto.Field( + proto.STRING, + number=1, + ) + threshold = proto.Field( + proto.MESSAGE, + number=3, + message=model_monitoring.ThresholdConfig, + ) + training_stats = proto.Field( + proto.MESSAGE, + number=4, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + prediction_stats = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + objective = proto.Field( + proto.ENUM, + number=1, + enum='ModelDeploymentMonitoringObjectiveType', + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + anomaly_count = proto.Field( + proto.INT32, + number=3, + ) + feature_stats = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=FeatureHistoricStatsAnomalies, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py new file mode 100644 index 0000000000..1e27712f03 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelEvaluation', + }, +) + + +class ModelEvaluation(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on all of the test data against annotations from the + test data. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluation. + display_name (str): + The display name of the ModelEvaluation. + metrics_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing the + [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] + of this ModelEvaluation. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Evaluation metrics of the Model. The schema of the metrics + is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluation was created. + slice_dimensions (Sequence[str]): + All possible + [dimensions][ModelEvaluationSlice.slice.dimension] of + ModelEvaluationSlices. The dimensions can be used as the + filter of the + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] + request, in the form of ``slice.dimension = ``. + model_explanation (google.cloud.aiplatform_v1beta1.types.ModelExplanation): + Aggregated explanation metrics for the + Model's prediction output over the data this + ModelEvaluation uses. This field is populated + only if the Model is evaluated with + explanations, and only for AutoML tabular + Models. + explanation_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): + Describes the values of + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] + that are used for explaining the predicted values on the + evaluated data. + metadata (google.protobuf.struct_pb2.Value): + The metadata of the ModelEvaluation. For the ModelEvaluation + uploaded from Managed Pipeline, metadata contains a + structured value with keys of "pipeline_job_id", + "evaluation_dataset_type", "evaluation_dataset_path". + """ + + class ModelEvaluationExplanationSpec(proto.Message): + r""" + + Attributes: + explanation_type (str): + Explanation type. + + For AutoML Image Classification models, possible values are: + + - ``image-integrated-gradients`` + - ``image-xrai`` + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + Explanation spec details. + """ + + explanation_type = proto.Field( + proto.STRING, + number=1, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=2, + message=explanation.ExplanationSpec, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=10, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + metrics = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + slice_dimensions = proto.RepeatedField( + proto.STRING, + number=5, + ) + model_explanation = proto.Field( + proto.MESSAGE, + number=8, + message=explanation.ModelExplanation, + ) + explanation_specs = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=ModelEvaluationExplanationSpec, + ) + metadata = proto.Field( + proto.MESSAGE, + number=11, + message=struct_pb2.Value, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py new file mode 100644 index 0000000000..48569f9e98 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelEvaluationSlice', + }, +) + + +class ModelEvaluationSlice(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on a slice of the test data against ground truth + annotations. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluationSlice. + slice_ (google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice.Slice): + Output only. The slice of the test data that + is used to evaluate the Model. + metrics_schema_uri (str): + Output only. Points to a YAML file stored on Google Cloud + Storage describing the + [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] + of this ModelEvaluationSlice. The schema is defined as an + OpenAPI 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Output only. Sliced evaluation metrics of the Model. The + schema of the metrics is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluationSlice was created. + """ + + class Slice(proto.Message): + r"""Definition of a slice. + + Attributes: + dimension (str): + Output only. The dimension of the slice. Well-known + dimensions are: + + - ``annotationSpec``: This slice is on the test data that + has either ground truth or prediction with + [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] + equals to + [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. + value (str): + Output only. The value of the dimension in + this slice. + """ + + dimension = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + slice_ = proto.Field( + proto.MESSAGE, + number=2, + message=Slice, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metrics = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py new file mode 100644 index 0000000000..638907875a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import io + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelMonitoringObjectiveConfig', + 'ModelMonitoringAlertConfig', + 'ThresholdConfig', + 'SamplingStrategy', + }, +) + + +class ModelMonitoringObjectiveConfig(proto.Message): + r"""Next ID: 8 + + Attributes: + training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset): + Training dataset for models. This field has + to be set only if + TrainingPredictionSkewDetectionConfig is + specified. + training_prediction_skew_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig): + The config for skew between training data and + prediction data. + prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): + The config for drift of prediction data. + explanation_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig): + The config for integrating with Vertex + Explainable AI. + """ + + class TrainingDataset(proto.Message): + r"""Training Dataset information. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + dataset (str): + The resource name of the Dataset used to + train this Model. + + This field is a member of `oneof`_ ``data_source``. + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Google Cloud Storage uri of the unmanaged + Dataset used to train this Model. + + This field is a member of `oneof`_ ``data_source``. + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + The BigQuery table of the unmanaged Dataset + used to train this Model. + + This field is a member of `oneof`_ ``data_source``. + data_format (str): + Data format of the dataset, only applicable + if the input is from Google Cloud Storage. + The possible formats are: + + "tf-record" + The source file is a TFRecord file. + + "csv" + The source file is a CSV file. + "jsonl" + The source file is a JSONL file. + target_field (str): + The target field name the model is to + predict. This field will be excluded when doing + Predict and (or) Explain for the training data. + logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): + Strategy to sample data from Training + Dataset. If not set, we process the whole + dataset. + """ + + dataset = proto.Field( + proto.STRING, + number=3, + oneof='data_source', + ) + gcs_source = proto.Field( + proto.MESSAGE, + number=4, + oneof='data_source', + message=io.GcsSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=5, + oneof='data_source', + message=io.BigQuerySource, + ) + data_format = proto.Field( + proto.STRING, + number=2, + ) + target_field = proto.Field( + proto.STRING, + number=6, + ) + logging_sampling_strategy = proto.Field( + proto.MESSAGE, + number=7, + message='SamplingStrategy', + ) + + class TrainingPredictionSkewDetectionConfig(proto.Message): + r"""The config for Training & Prediction data skew detection. It + specifies the training dataset sources and the skew detection + parameters. + + Attributes: + skew_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for skew, a value threshold must be configured + for that feature. The threshold here is against + feature distribution distance between the + training and prediction feature. + attribution_score_skew_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. The threshold here is against + attribution score distance between the training + and prediction feature. + default_skew_threshold (google.cloud.aiplatform_v1beta1.types.ThresholdConfig): + Skew anomaly detection threshold used by all + features. When the per-feature thresholds are + not set, this field can be used to specify a + threshold for all features. + """ + + skew_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='ThresholdConfig', + ) + attribution_score_skew_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message='ThresholdConfig', + ) + default_skew_threshold = proto.Field( + proto.MESSAGE, + number=6, + message='ThresholdConfig', + ) + + class PredictionDriftDetectionConfig(proto.Message): + r"""The config for Prediction data drift detection. + + Attributes: + drift_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for drift, a value threshold must be configured + for that feature. The threshold here is against + feature distribution distance between different + time windws. + attribution_score_drift_thresholds (Mapping[str, google.cloud.aiplatform_v1beta1.types.ThresholdConfig]): + Key is the feature name and value is the + threshold. The threshold here is against + attribution score distance between different + time windows. + default_drift_threshold (google.cloud.aiplatform_v1beta1.types.ThresholdConfig): + Drift anomaly detection threshold used by all + features. When the per-feature thresholds are + not set, this field can be used to specify a + threshold for all features. + """ + + drift_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='ThresholdConfig', + ) + attribution_score_drift_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message='ThresholdConfig', + ) + default_drift_threshold = proto.Field( + proto.MESSAGE, + number=5, + message='ThresholdConfig', + ) + + class ExplanationConfig(proto.Message): + r"""The config for integrating with Vertex Explainable AI. Only + applicable if the Model has explanation_spec populated. + + Attributes: + enable_feature_attributes (bool): + If want to analyze the Vertex Explainable AI + feature attribute scores or not. If set to true, + Vertex AI will log the feature attributions from + explain response and do the skew/drift detection + for them. + explanation_baseline (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline): + Predictions generated by the + BatchPredictionJob using baseline dataset. + """ + + class ExplanationBaseline(proto.Message): + r"""Output from + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] + for Model Monitoring baseline dataset, which can be used to generate + baseline attribution scores. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Cloud Storage location for BatchExplain + output. + + This field is a member of `oneof`_ ``destination``. + bigquery (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + BigQuery location for BatchExplain output. + + This field is a member of `oneof`_ ``destination``. + prediction_format (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat): + The storage format of the predictions + generated BatchPrediction job. + """ + class PredictionFormat(proto.Enum): + r"""The storage format of the predictions generated + BatchPrediction job. + """ + PREDICTION_FORMAT_UNSPECIFIED = 0 + JSONL = 2 + BIGQUERY = 3 + + gcs = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.GcsDestination, + ) + bigquery = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.BigQueryDestination, + ) + prediction_format = proto.Field( + proto.ENUM, + number=1, + enum='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat', + ) + + enable_feature_attributes = proto.Field( + proto.BOOL, + number=1, + ) + explanation_baseline = proto.Field( + proto.MESSAGE, + number=2, + message='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline', + ) + + training_dataset = proto.Field( + proto.MESSAGE, + number=1, + message=TrainingDataset, + ) + training_prediction_skew_detection_config = proto.Field( + proto.MESSAGE, + number=2, + message=TrainingPredictionSkewDetectionConfig, + ) + prediction_drift_detection_config = proto.Field( + proto.MESSAGE, + number=3, + message=PredictionDriftDetectionConfig, + ) + explanation_config = proto.Field( + proto.MESSAGE, + number=5, + message=ExplanationConfig, + ) + + +class ModelMonitoringAlertConfig(proto.Message): + r"""Next ID: 3 + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + email_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig.EmailAlertConfig): + Email alert config. + + This field is a member of `oneof`_ ``alert``. + enable_logging (bool): + Dump the anomalies to Cloud Logging. The anomalies will be + put to json payload encoded from proto + [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. + This can be further sinked to Pub/Sub or any other services + supported by Cloud Logging. + """ + + class EmailAlertConfig(proto.Message): + r"""The config for email alert. + + Attributes: + user_emails (Sequence[str]): + The email addresses to send the alert. + """ + + user_emails = proto.RepeatedField( + proto.STRING, + number=1, + ) + + email_alert_config = proto.Field( + proto.MESSAGE, + number=1, + oneof='alert', + message=EmailAlertConfig, + ) + enable_logging = proto.Field( + proto.BOOL, + number=2, + ) + + +class ThresholdConfig(proto.Message): + r"""The config for feature monitoring threshold. + Next ID: 3 + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + value (float): + Specify a threshold value that can trigger + the alert. If this threshold config is for + feature distribution distance: 1. For + categorical feature, the distribution distance + is calculated by L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + Each feature must have a non-zero threshold if + they need to be monitored. Otherwise no alert + will be triggered for that feature. + + This field is a member of `oneof`_ ``threshold``. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + oneof='threshold', + ) + + +class SamplingStrategy(proto.Message): + r"""Sampling Strategy for logging, can be for both training and + prediction dataset. + Next ID: 2 + + Attributes: + random_sample_config (google.cloud.aiplatform_v1beta1.types.SamplingStrategy.RandomSampleConfig): + Random sample config. Will support more + sampling strategies later. + """ + + class RandomSampleConfig(proto.Message): + r"""Requests are randomly selected. + + Attributes: + sample_rate (float): + Sample rate (0, 1] + """ + + sample_rate = proto.Field( + proto.DOUBLE, + number=1, + ) + + random_sample_config = proto.Field( + proto.MESSAGE, + number=1, + message=RandomSampleConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py new file mode 100644 index 0000000000..288c7abaa1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -0,0 +1,853 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'UploadModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelResponse', + 'GetModelRequest', + 'ListModelsRequest', + 'ListModelsResponse', + 'ListModelVersionsRequest', + 'ListModelVersionsResponse', + 'UpdateModelRequest', + 'UpdateExplanationDatasetRequest', + 'UpdateExplanationDatasetOperationMetadata', + 'DeleteModelRequest', + 'DeleteModelVersionRequest', + 'MergeVersionAliasesRequest', + 'ExportModelRequest', + 'ExportModelOperationMetadata', + 'UpdateExplanationDatasetResponse', + 'ExportModelResponse', + 'ImportModelEvaluationRequest', + 'GetModelEvaluationRequest', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'GetModelEvaluationSliceRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + }, +) + + +class UploadModelRequest(proto.Message): + r"""Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + + Attributes: + parent (str): + Required. The resource name of the Location into which to + upload the Model. Format: + ``projects/{project}/locations/{location}`` + parent_model (str): + Optional. The resource name of the model into + which to upload the version. Only specify this + field when uploading a new version. + model_id (str): + Optional. The ID to use for the uploaded Model, which will + become the final component of the model resource name. + + This value may be up to 63 characters, and valid characters + are ``[a-z0-9_-]``. The first character cannot be a number + or hyphen. + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + parent_model = proto.Field( + proto.STRING, + number=4, + ) + model_id = proto.Field( + proto.STRING, + number=5, + ) + model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model.Model, + ) + + +class UploadModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UploadModelResponse(proto.Message): + r"""Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + Attributes: + model (str): + The name of the uploaded Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + model_version_id (str): + Output only. The version ID of the model that + is uploaded. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_version_id = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelRequest(proto.Message): + r"""Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + + Attributes: + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + In order to retrieve a specific version of the model, also + provide the version ID or version alias. Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the "default" + version will be returned. The "default" version alias is + created for the first version of the model, and can be moved + to other versions later on. There will be exactly one + default version. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``model`` supports = and !=. ``model`` represents the + Model ID, i.e. the last segment of the Model's [resource + name][google.cloud.aiplatform.v1beta1.Model.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``model=1234`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] + of the previous + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + + Attributes: + models (Sequence[google.cloud.aiplatform_v1beta1.types.Model]): + List of Models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + models = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class ListModelVersionsRequest(proto.Message): + r"""Request message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions]. + + Attributes: + name (str): + Required. The name of the model to list + versions for. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelVersionsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsResponse.next_page_token] + of the previous [ModelService.ListModelversions][] call. + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``labels.myKey="myValue"`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelVersionsResponse(proto.Message): + r"""Response message for + [ModelService.ListModelVersions][google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions] + + Attributes: + models (Sequence[google.cloud.aiplatform_v1beta1.types.Model]): + List of Model versions in the requested page. + In the returned Model name field, version ID + instead of regvision tag will be included. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListModelVersionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelVersionsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + models = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateModelRequest(proto.Message): + r"""Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + + Attributes: + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model which replaces the resource on the + server. When Model Versioning is enabled, the model.name + will be used to determine whether to update the model or + model version. + + 1. model.name with the @ value, e.g. models/123@1, refers to + a version specific update. + 2. model.name without the @ value, e.g. models/123, refers + to a model update. + 3. model.name with @-, e.g. models/123@-, refers to a model + update. + 4. Supported model fields: display_name, description; + supported version-specific fields: version_description. + Labels are supported in both scenarios. Both the model + labels and the version labels are merged when a model is + returned. When updating labels, if the request is for + model-specific update, model label gets updated. + Otherwise, version labels get updated. + 5. A model name or model version name fields update mismatch + will cause a precondition error. + 6. One request cannot update both the model and the version + fields. You must update them separately. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateExplanationDatasetRequest(proto.Message): + r"""Request message for + [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset]. + + Attributes: + model (str): + Required. The resource name of the Model to update. Format: + ``projects/{project}/locations/{location}/models/{model}`` + examples (google.cloud.aiplatform_v1beta1.types.Examples): + The example config containing the location of + the dataset. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + examples = proto.Field( + proto.MESSAGE, + number=2, + message=explanation.Examples, + ) + + +class UpdateExplanationDatasetOperationMetadata(proto.Message): + r"""Runtime operation information for + [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + + Attributes: + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeleteModelVersionRequest(proto.Message): + r"""Request message for + [ModelService.DeleteModelVersion][google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion]. + + Attributes: + name (str): + Required. The name of the model version to be deleted, with + a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class MergeVersionAliasesRequest(proto.Message): + r"""Request message for + [ModelService.MergeVersionAliases][google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases]. + + Attributes: + name (str): + Required. The name of the model version to merge aliases, + with a version ID explicitly included. + + Example: + ``projects/{project}/locations/{location}/models/{model}@1234`` + version_aliases (Sequence[str]): + Required. The set of version aliases to merge. The alias + should be at most 128 characters, and match + ``[a-z][a-z0-9-]{0,126}[a-z-0-9]``. Add the ``-`` prefix to + an alias means removing that alias from the version. ``-`` + is NOT counted in the 128 characters. Example: ``-golden`` + means removing the ``golden`` alias from the version. + + There is NO ordering in aliases, which means + + 1) The aliases returned from GetModel API might not have the + exactly same order from this MergeVersionAliases API. 2) + Adding and deleting the same alias in the request is not + recommended, and the 2 operations will be cancelled out. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + version_aliases = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + + Attributes: + name (str): + Required. The resource name of the Model to + export. The resource name may contain version id + or version alias to specify the version, if no + version is specified, the default version will + be exported. + output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): + Required. The desired output location and + configuration. + """ + + class OutputConfig(proto.Message): + r"""Output configuration for the Model export. + + Attributes: + export_format_id (str): + The ID of the format in which the Model must be exported. + Each Model lists the [export formats it + supports][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + If no value is provided here, then the first from the list + of the Model's supported formats is used by default. + artifact_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location where the Model artifact is to be + written to. Under the directory given as the destination a + new one with name + "``model-export--``", + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format, will be created. Inside, the Model and any of its + supporting files will be written. This field should only be + set when the ``exportableContent`` field of the + [Model.supported_export_formats] object contains + ``ARTIFACT``. + image_destination (google.cloud.aiplatform_v1beta1.types.ContainerRegistryDestination): + The Google Container Registry or Artifact Registry uri where + the Model container image will be copied to. This field + should only be set when the ``exportableContent`` field of + the [Model.supported_export_formats] object contains + ``IMAGE``. + """ + + export_format_id = proto.Field( + proto.STRING, + number=1, + ) + artifact_destination = proto.Field( + proto.MESSAGE, + number=3, + message=io.GcsDestination, + ) + image_destination = proto.Field( + proto.MESSAGE, + number=4, + message=io.ContainerRegistryDestination, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + output_config = proto.Field( + proto.MESSAGE, + number=2, + message=OutputConfig, + ) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + output_info (google.cloud.aiplatform_v1beta1.types.ExportModelOperationMetadata.OutputInfo): + Output only. Information further describing + the output of this Model export. + """ + + class OutputInfo(proto.Message): + r"""Further describes the output of the ExportModel. Supplements + [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig]. + + Attributes: + artifact_output_uri (str): + Output only. If the Model artifact is being + exported to Google Cloud Storage this is the + full path of the directory created, into which + the Model files are being written to. + image_output_uri (str): + Output only. If the Model image is being + exported to Google Container Registry or + Artifact Registry this is the full path of the + image created. + """ + + artifact_output_uri = proto.Field( + proto.STRING, + number=2, + ) + image_output_uri = proto.Field( + proto.STRING, + number=3, + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + output_info = proto.Field( + proto.MESSAGE, + number=2, + message=OutputInfo, + ) + + +class UpdateExplanationDatasetResponse(proto.Message): + r"""Response message of + [ModelService.UpdateExplanationDataset][google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset] + operation. + + """ + + +class ExportModelResponse(proto.Message): + r"""Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + + """ + + +class ImportModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation] + + Attributes: + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + model_evaluation (google.cloud.aiplatform_v1beta1.types.ModelEvaluation): + Required. Model evaluation resource to be + imported. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + model_evaluation = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_evaluation.ModelEvaluation, + ) + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + + Attributes: + name (str): + Required. The name of the ModelEvaluation resource. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Attributes: + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Attributes: + model_evaluations (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_evaluation.ModelEvaluation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelEvaluationSliceRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + + Attributes: + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationSlicesRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Attributes: + parent (str): + Required. The resource name of the ModelEvaluation to list + the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + filter (str): + The standard list filter. + + - ``slice.dimension`` - for =. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationSlicesResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Attributes: + model_evaluation_slices (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation_slices = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_evaluation_slice.ModelEvaluationSlice, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py new file mode 100644 index 0000000000..24de748706 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'GenericOperationMetadata', + 'DeleteOperationMetadata', + }, +) + + +class GenericOperationMetadata(proto.Message): + r"""Generic Metadata shared by all operations. + + Attributes: + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + updated for the last time. If the operation has + finished (successfully or not), this is the + finish time. + """ + + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message='GenericOperationMetadata', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_failure_policy.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_failure_policy.py new file mode 100644 index 0000000000..326a7b99cc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_failure_policy.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'PipelineFailurePolicy', + }, +) + + +class PipelineFailurePolicy(proto.Enum): + r"""Represents the failure policy of a pipeline. Currently, the default + of a pipeline is that the pipeline will continue to run until no + more tasks can be executed, also known as + PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is set to + PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop scheduling any new + tasks when a task has failed. Any scheduled tasks will continue to + completion. + """ + PIPELINE_FAILURE_POLICY_UNSPECIFIED = 0 + PIPELINE_FAILURE_POLICY_FAIL_SLOW = 1 + PIPELINE_FAILURE_POLICY_FAIL_FAST = 2 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py new file mode 100644 index 0000000000..943ab2867f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -0,0 +1,574 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import pipeline_failure_policy +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import value as gca_value +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'PipelineJob', + 'PipelineTemplateMetadata', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + }, +) + + +class PipelineJob(proto.Message): + r"""An instance of a machine learning PipelineJob. + + Attributes: + name (str): + Output only. The resource name of the + PipelineJob. + display_name (str): + The display name of the Pipeline. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline creation time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline end time. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this PipelineJob + was most recently updated. + pipeline_spec (google.protobuf.struct_pb2.Struct): + The spec of the pipeline. + state (google.cloud.aiplatform_v1beta1.types.PipelineState): + Output only. The detailed state of the job. + job_detail (google.cloud.aiplatform_v1beta1.types.PipelineJobDetail): + Output only. The details of pipeline run. Not + available in the list view. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + pipeline execution. Only populated when the + pipeline's state is FAILED or CANCELLED. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize PipelineJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + runtime_config (google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig): + Runtime config of the pipeline. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + pipelineJob. If set, this PipelineJob and all of + its sub-resources will be secured by this key. + service_account (str): + The service account that the pipeline workload runs as. If + not specified, the Compute Engine default service account in + the project will be used. See + https://cloud.google.com/compute/docs/access/service-accounts#default_service_account + + Users starting the pipeline must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + network (str): + The full name of the Compute Engine + `network `__ + to which the Pipeline Job's workload should be peered. For + example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. Pipeline job will apply the network configuration + to the GCP resources being launched, if applied, such as + Vertex AI Training or Dataflow job. If left unspecified, the + workload is not peered with any network. + template_uri (str): + A template uri from where the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec], + if empty, will be downloaded. + template_metadata (google.cloud.aiplatform_v1beta1.types.PipelineTemplateMetadata): + Output only. Pipeline template metadata. Will fill up fields + if + [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri] + is from supported template registry. + """ + + class RuntimeConfig(proto.Message): + r"""The runtime config of a PipelineJob. + + Attributes: + parameters (Mapping[str, google.cloud.aiplatform_v1beta1.types.Value]): + Deprecated. Use + [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values] + instead. The runtime parameters of the PipelineJob. The + parameters will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.0.0 or lower, + such as pipelines built using Kubeflow Pipelines SDK 1.8 or + lower. + gcs_output_directory (str): + Required. A path in a Cloud Storage bucket, which will be + treated as the root output directory of the pipeline. It is + used by the system to generate the paths of output + artifacts. The artifact paths are generated with a sub-path + pattern ``{job_id}/{task_id}/{output_key}`` under the + specified output directory. The service account specified in + this pipeline must have the ``storage.objects.get`` and + ``storage.objects.create`` permissions for this bucket. + parameter_values (Mapping[str, google.protobuf.struct_pb2.Value]): + The runtime parameters of the PipelineJob. The parameters + will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.1.0, such as + pipelines built using Kubeflow Pipelines SDK 1.9 or higher + and the v2 DSL. + failure_policy (google.cloud.aiplatform_v1beta1.types.PipelineFailurePolicy): + Represents the failure policy of a pipeline. Currently, the + default of a pipeline is that the pipeline will continue to + run until no more tasks can be executed, also known as + PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a pipeline is + set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop + scheduling any new tasks when a task has failed. Any + scheduled tasks will continue to completion. + """ + + parameters = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=gca_value.Value, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + parameter_values = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + failure_policy = proto.Field( + proto.ENUM, + number=4, + enum=pipeline_failure_policy.PipelineFailurePolicy, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + pipeline_spec = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Struct, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=pipeline_state.PipelineState, + ) + job_detail = proto.Field( + proto.MESSAGE, + number=9, + message='PipelineJobDetail', + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + runtime_config = proto.Field( + proto.MESSAGE, + number=12, + message=RuntimeConfig, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=16, + message=gca_encryption_spec.EncryptionSpec, + ) + service_account = proto.Field( + proto.STRING, + number=17, + ) + network = proto.Field( + proto.STRING, + number=18, + ) + template_uri = proto.Field( + proto.STRING, + number=19, + ) + template_metadata = proto.Field( + proto.MESSAGE, + number=20, + message='PipelineTemplateMetadata', + ) + + +class PipelineTemplateMetadata(proto.Message): + r"""Pipeline template metadata if + [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri] + is from supported template registry. Currently, the only supported + registry is Artifact Registry. + + Attributes: + version (str): + The version_name in artifact registry. + + Will always be presented in output if the + [PipelineJob.template_uri][google.cloud.aiplatform.v1beta1.PipelineJob.template_uri] + is from supported template registry. + + Format is "sha256:abcdef123456...". + """ + + version = proto.Field( + proto.STRING, + number=3, + ) + + +class PipelineJobDetail(proto.Message): + r"""The runtime detail of PipelineJob. + + Attributes: + pipeline_context (google.cloud.aiplatform_v1beta1.types.Context): + Output only. The context of the pipeline. + pipeline_run_context (google.cloud.aiplatform_v1beta1.types.Context): + Output only. The context of the current + pipeline run. + task_details (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail]): + Output only. The runtime details of the tasks + under the pipeline. + """ + + pipeline_context = proto.Field( + proto.MESSAGE, + number=1, + message=context.Context, + ) + pipeline_run_context = proto.Field( + proto.MESSAGE, + number=2, + message=context.Context, + ) + task_details = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='PipelineTaskDetail', + ) + + +class PipelineTaskDetail(proto.Message): + r"""The runtime detail of a task execution. + + Attributes: + task_id (int): + Output only. The system generated ID of the + task. + parent_task_id (int): + Output only. The id of the parent task if the + task is within a component scope. Empty if the + task is at the root level. + task_name (str): + Output only. The user specified name of the task that is + defined in [PipelineJob.spec][]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task create time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task end time. + executor_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail): + Output only. The detailed execution info. + state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): + Output only. State of the task. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Output only. The execution metadata of the + task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + task execution. Only populated when the task's + state is FAILED or CANCELLED. + pipeline_task_status (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.PipelineTaskStatus]): + Output only. A list of task status. This + field keeps a record of task status evolving + over time. + inputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.ArtifactList]): + Output only. The runtime input artifacts of + the task. + outputs (Mapping[str, google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.ArtifactList]): + Output only. The runtime output artifacts of + the task. + """ + class State(proto.Enum): + r"""Specifies state of TaskExecution""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + SUCCEEDED = 3 + CANCEL_PENDING = 4 + CANCELLING = 5 + CANCELLED = 6 + FAILED = 7 + SKIPPED = 8 + NOT_TRIGGERED = 9 + + class PipelineTaskStatus(proto.Message): + r"""A single record of the task status. + + Attributes: + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Update time of this status. + state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): + Output only. The state of the task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + the state. May be set when the state is any of + the non-final state (PENDING/RUNNING/CANCELLING) + or FAILED state. If the state is FAILED, the + error here is final and not going to be retried. + If the state is a non-final state, the error + indicates a system-error being retried. + """ + + update_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + state = proto.Field( + proto.ENUM, + number=2, + enum='PipelineTaskDetail.State', + ) + error = proto.Field( + proto.MESSAGE, + number=3, + message=status_pb2.Status, + ) + + class ArtifactList(proto.Message): + r"""A list of artifact metadata. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + Output only. A list of artifact metadata. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=artifact.Artifact, + ) + + task_id = proto.Field( + proto.INT64, + number=1, + ) + parent_task_id = proto.Field( + proto.INT64, + number=12, + ) + task_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + executor_detail = proto.Field( + proto.MESSAGE, + number=6, + message='PipelineTaskExecutorDetail', + ) + state = proto.Field( + proto.ENUM, + number=7, + enum=State, + ) + execution = proto.Field( + proto.MESSAGE, + number=8, + message=gca_execution.Execution, + ) + error = proto.Field( + proto.MESSAGE, + number=9, + message=status_pb2.Status, + ) + pipeline_task_status = proto.RepeatedField( + proto.MESSAGE, + number=13, + message=PipelineTaskStatus, + ) + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=10, + message=ArtifactList, + ) + outputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=11, + message=ArtifactList, + ) + + +class PipelineTaskExecutorDetail(proto.Message): + r"""The runtime detail of a pipeline executor. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + container_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.ContainerDetail): + Output only. The detailed info for a + container executor. + + This field is a member of `oneof`_ ``details``. + custom_job_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.CustomJobDetail): + Output only. The detailed info for a custom + job executor. + + This field is a member of `oneof`_ ``details``. + """ + + class ContainerDetail(proto.Message): + r"""The detail of a container execution. It contains the job + names of the lifecycle of a container execution. + + Attributes: + main_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for + the main container execution. + pre_caching_check_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for + the pre-caching-check container execution. This job will be + available if the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] + specifies the ``pre_caching_check`` hook in the lifecycle + events. + """ + + main_job = proto.Field( + proto.STRING, + number=1, + ) + pre_caching_check_job = proto.Field( + proto.STRING, + number=2, + ) + + class CustomJobDetail(proto.Message): + r"""The detailed info for a custom job executor. + + Attributes: + job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob]. + """ + + job = proto.Field( + proto.STRING, + number=1, + ) + + container_detail = proto.Field( + proto.MESSAGE, + number=1, + oneof='details', + message=ContainerDetail, + ) + custom_job_detail = proto.Field( + proto.MESSAGE, + number=2, + oneof='details', + message=CustomJobDetail, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py new file mode 100644 index 0000000000..f3cf45a77b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -0,0 +1,416 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateTrainingPipelineRequest', + 'GetTrainingPipelineRequest', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'DeleteTrainingPipelineRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'GetPipelineJobRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'DeletePipelineJobRequest', + 'CancelPipelineJobRequest', + }, +) + + +class CreateTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): + Required. The TrainingPipeline to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + training_pipeline = proto.Field( + proto.MESSAGE, + number=2, + message=gca_training_pipeline.TrainingPipeline, + ) + + +class GetTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTrainingPipelinesRequest(proto.Message): + r"""Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + + - ``NOT display_name="my_pipeline"`` + + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] + of the previous + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListTrainingPipelinesResponse(proto.Message): + r"""Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + + Attributes: + training_pipelines (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline]): + List of TrainingPipelines in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + training_pipelines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_training_pipeline.TrainingPipeline, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource to be + deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreatePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): + Required. The PipelineJob to create. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not provided, an + ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + pipeline_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_pipeline_job.PipelineJob, + ) + pipeline_job_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListPipelineJobsRequest(proto.Message): + r"""Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the PipelineJobs that match the filter expression. The + following fields are supported: + + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``display_name``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``pipeline_job_user_id``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. for example, can check + if pipeline's display_name contains *step* by doing + display_name:"*step*" + - ``state``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality and key presence. + - ``template_uri``: Supports ``=``, ``!=`` comparisons, and + ``:`` wildcard. + - ``template_metadata.version``: Supports ``=``, ``!=`` + comparisons, and ``:`` wildcard. + + Filter expressions can be combined together using logical + operators (``AND`` & ``OR``). For example: + ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. + + The syntax to define filter expression is based on + https://google.aip.dev/160. + + Examples: + + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token] + of the previous + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + call. + order_by (str): + A comma-separated list of fields to order by. The default + sort order is in ascending order. Use "desc" after a field + name for descending. You can have multiple order_by fields + provided e.g. "create_time desc, end_time", "end_time, + start_time, update_time" For example, using "create_time + desc, end_time" will order results by create time in + descending order, and if there are multiple jobs having the + same create time, order them by the end time in ascending + order. if order_by is not specified, it will order by + default order is create time in descending order. Supported + fields: + + - ``create_time`` + - ``update_time`` + - ``end_time`` + - ``start_time`` + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListPipelineJobsResponse(proto.Message): + r"""Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Attributes: + pipeline_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob]): + List of PipelineJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + pipeline_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_pipeline_job.PipelineJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeletePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py new file mode 100644 index 0000000000..4cde39962d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'PipelineState', + }, +) + + +class PipelineState(proto.Enum): + r"""Describes the state of a pipeline.""" + PIPELINE_STATE_UNSPECIFIED = 0 + PIPELINE_STATE_QUEUED = 1 + PIPELINE_STATE_PENDING = 2 + PIPELINE_STATE_RUNNING = 3 + PIPELINE_STATE_SUCCEEDED = 4 + PIPELINE_STATE_FAILED = 5 + PIPELINE_STATE_CANCELLING = 6 + PIPELINE_STATE_CANCELLED = 7 + PIPELINE_STATE_PAUSED = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py new file mode 100644 index 0000000000..baa8bf4774 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -0,0 +1,284 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import explanation +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'PredictRequest', + 'PredictResponse', + 'RawPredictRequest', + 'ExplainRequest', + 'ExplainResponse', + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the prediction + call. A DeployedModel may have an upper limit on the number + of instances it supports per request, and when it is + exceeded the prediction call errors in case of AutoML + Models, or, in case of customer created Models, the + behaviour is as documented by that Model. The schema of any + single instance may be specified via Endpoint's + DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of the + parameters may be specified via Endpoint's DeployedModels' + [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + instances = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + parameters = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + Attributes: + predictions (Sequence[google.protobuf.struct_pb2.Value]): + The predictions that are the output of the predictions call. + The schema of any single prediction may be specified via + Endpoint's DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + deployed_model_id (str): + ID of the Endpoint's DeployedModel that + served this prediction. + model (str): + Output only. The resource name of the Model + which is deployed as the DeployedModel that this + prediction hits. + model_version_id (str): + Output only. The version ID of the Model + which is deployed as the DeployedModel that this + prediction hits. + model_display_name (str): + Output only. The [display + name][google.cloud.aiplatform.v1beta1.Model.display_name] of + the Model which is deployed as the DeployedModel that this + prediction hits. + """ + + predictions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + model = proto.Field( + proto.STRING, + number=3, + ) + model_version_id = proto.Field( + proto.STRING, + number=5, + ) + model_display_name = proto.Field( + proto.STRING, + number=4, + ) + + +class RawPredictRequest(proto.Message): + r"""Request message for + [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + http_body (google.api.httpbody_pb2.HttpBody): + The prediction input. Supports HTTP headers and arbitrary + data payload. + + A + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + may have an upper limit on the number of instances it + supports per request. When this limit it is exceeded for an + AutoML model, the + [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] + method returns an error. When this limit is exceeded for a + custom-trained model, the behavior varies depending on the + model. + + You can specify the schema for each instance in the + [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + field when you create a + [Model][google.cloud.aiplatform.v1beta1.Model]. This schema + applies when you deploy the ``Model`` as a ``DeployedModel`` + to an [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] + and use the ``RawPredict`` method. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + http_body = proto.Field( + proto.MESSAGE, + number=2, + message=httpbody_pb2.HttpBody, + ) + + +class ExplainRequest(proto.Message): + r"""Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper limit on + the number of instances it supports per request, and when it + is exceeded the explanation call errors in case of AutoML + Models, or, in case of customer created Models, the + behaviour is as documented by that Model. The schema of any + single instance may be specified via Endpoint's + DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of the + parameters may be specified via Endpoint's DeployedModels' + [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + explanation_spec_override (google.cloud.aiplatform_v1beta1.types.ExplanationSpecOverride): + If specified, overrides the + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + of the DeployedModel. Can be used for explaining prediction + results with different configurations, such as: + + - Explaining top-5 predictions results as opposed to top-1; + - Increasing path count or step count of the attribution + methods to reduce approximate errors; + - Using different baselines for explaining the prediction + results. + deployed_model_id (str): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + instances = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + parameters = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + explanation_spec_override = proto.Field( + proto.MESSAGE, + number=5, + message=explanation.ExplanationSpecOverride, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=3, + ) + + +class ExplainResponse(proto.Message): + r"""Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + Attributes: + explanations (Sequence[google.cloud.aiplatform_v1beta1.types.Explanation]): + The explanations of the Model's + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. + + It has the same number of elements as + [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + to be explained. + deployed_model_id (str): + ID of the Endpoint's DeployedModel that + served this explanation. + predictions (Sequence[google.protobuf.struct_pb2.Value]): + The predictions that are the output of the predictions call. + Same as + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. + """ + + explanations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=explanation.Explanation, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + predictions = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py new file mode 100644 index 0000000000..0a5a6d35c1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'SpecialistPool', + }, +) + + +class SpecialistPool(proto.Message): + r"""SpecialistPool represents customers' own workforce to work on + their data labeling jobs. It includes a group of specialist + managers and workers. Managers are responsible for managing the + workers in this pool as well as customers' data labeling jobs + associated with this pool. Customers create specialist pool as + well as start data labeling jobs on Cloud, managers and workers + handle the jobs using CrowdCompute console. + + Attributes: + name (str): + Required. The resource name of the + SpecialistPool. + display_name (str): + Required. The user-defined name of the + SpecialistPool. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + This field should be unique on project-level. + specialist_managers_count (int): + Output only. The number of managers in this + SpecialistPool. + specialist_manager_emails (Sequence[str]): + The email addresses of the managers in the + SpecialistPool. + pending_data_labeling_jobs (Sequence[str]): + Output only. The resource name of the pending + data labeling jobs. + specialist_worker_emails (Sequence[str]): + The email addresses of workers in the + SpecialistPool. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + specialist_managers_count = proto.Field( + proto.INT32, + number=3, + ) + specialist_manager_emails = proto.RepeatedField( + proto.STRING, + number=4, + ) + pending_data_labeling_jobs = proto.RepeatedField( + proto.STRING, + number=5, + ) + specialist_worker_emails = proto.RepeatedField( + proto.STRING, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py new file mode 100644 index 0000000000..556e8063b6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py @@ -0,0 +1,237 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateSpecialistPoolRequest', + 'CreateSpecialistPoolOperationMetadata', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'DeleteSpecialistPoolRequest', + 'UpdateSpecialistPoolRequest', + 'UpdateSpecialistPoolOperationMetadata', + }, +) + + +class CreateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + specialist_pool = proto.Field( + proto.MESSAGE, + number=2, + message=gca_specialist_pool.SpecialistPool, + ) + + +class CreateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation information for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + + Attributes: + name (str): + Required. The name of the SpecialistPool resource. The form + is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSpecialistPoolsRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + parent (str): + Required. The name of the SpecialistPool's parent resource. + Format: ``projects/{project}/locations/{location}`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained by + [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] + of the previous + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] + call. Return first page if empty. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + FieldMask represents a set of + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=4, + message=field_mask_pb2.FieldMask, + ) + + +class ListSpecialistPoolsResponse(proto.Message): + r"""Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + specialist_pools (Sequence[google.cloud.aiplatform_v1beta1.types.SpecialistPool]): + A list of SpecialistPools that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + specialist_pools = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + + Attributes: + name (str): + Required. The resource name of the SpecialistPool to delete. + Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + force (bool): + If set to true, any specialist managers in + this SpecialistPool will also be deleted. + (Otherwise, the request will only work if the + SpecialistPool has no specialist managers.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class UpdateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + specialist_pool = proto.Field( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation metadata for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (str): + Output only. The name of the SpecialistPool to which the + specialists are being added. Format: + ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + specialist_pool = proto.Field( + proto.STRING, + number=1, + ) + generic_metadata = proto.Field( + proto.MESSAGE, + number=2, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py new file mode 100644 index 0000000000..932ef0c4d0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py @@ -0,0 +1,963 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Study', + 'Trial', + 'StudySpec', + 'Measurement', + }, +) + + +class Study(proto.Message): + r"""A message representing a Study. + + Attributes: + name (str): + Output only. The name of a study. The study's globally + unique identifier. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + display_name (str): + Required. Describes the Study, default value + is empty string. + study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): + Required. Configuration of the Study. + state (google.cloud.aiplatform_v1beta1.types.Study.State): + Output only. The detailed state of a Study. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time at which the study was + created. + inactive_reason (str): + Output only. A human readable reason why the + Study is inactive. This should be empty if a + study is ACTIVE or COMPLETED. + """ + class State(proto.Enum): + r"""Describes the Study state.""" + STATE_UNSPECIFIED = 0 + ACTIVE = 1 + INACTIVE = 2 + COMPLETED = 3 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + study_spec = proto.Field( + proto.MESSAGE, + number=3, + message='StudySpec', + ) + state = proto.Field( + proto.ENUM, + number=4, + enum=State, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + inactive_reason = proto.Field( + proto.STRING, + number=6, + ) + + +class Trial(proto.Message): + r"""A message representing a Trial. A Trial contains a unique set + of Parameters that has been or will be evaluated, along with the + objective metrics got by running the Trial. + + Attributes: + name (str): + Output only. Resource name of the Trial + assigned by the service. + id (str): + Output only. The identifier of the Trial + assigned by the service. + state (google.cloud.aiplatform_v1beta1.types.Trial.State): + Output only. The detailed state of the Trial. + parameters (Sequence[google.cloud.aiplatform_v1beta1.types.Trial.Parameter]): + Output only. The parameters of the Trial. + final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): + Output only. The final measurement containing + the objective value. + measurements (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement]): + Output only. A list of measurements that are strictly + lexicographically ordered by their induced tuples (steps, + elapsed_duration). These are used for early stopping + computations. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial's status changed to + ``SUCCEEDED`` or ``INFEASIBLE``. + client_id (str): + Output only. The identifier of the client that originally + requested this Trial. Each client is identified by a unique + client_id. When a client asks for a suggestion, Vertex AI + Vizier will assign it a Trial. The client should evaluate + the Trial, complete it, and report back to Vertex AI Vizier. + If suggestion is asked again by same client_id before the + Trial is completed, the same Trial will be returned. + Multiple clients with different client_ids can ask for + suggestions simultaneously, each of them will get their own + Trial. + infeasible_reason (str): + Output only. A human readable string describing why the + Trial is infeasible. This is set only if Trial state is + ``INFEASIBLE``. + custom_job (str): + Output only. The CustomJob name linked to the + Trial. It's set for a HyperparameterTuningJob's + Trial. + web_access_uris (Mapping[str, str]): + Output only. URIs for accessing `interactive + shells `__ + (one URI for each training node). Only available if this + trial is part of a + [HyperparameterTuningJob][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob] + and the job's + [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] + field is ``true``. + + The keys are names of each node used for the trial; for + example, ``workerpool0-0`` for the primary node, + ``workerpool1-0`` for the first node in the second worker + pool, and ``workerpool1-1`` for the second node in the + second worker pool. + + The values are the URIs for each node's interactive shell. + """ + class State(proto.Enum): + r"""Describes a Trial state.""" + STATE_UNSPECIFIED = 0 + REQUESTED = 1 + ACTIVE = 2 + STOPPING = 3 + SUCCEEDED = 4 + INFEASIBLE = 5 + + class Parameter(proto.Message): + r"""A message representing a parameter to be tuned. + + Attributes: + parameter_id (str): + Output only. The ID of the parameter. The parameter should + be defined in [StudySpec's + Parameters][google.cloud.aiplatform.v1beta1.StudySpec.parameters]. + value (google.protobuf.struct_pb2.Value): + Output only. The value of the parameter. ``number_value`` + will be set if a parameter defined in StudySpec is in type + 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be + set if a parameter defined in StudySpec is in type + 'CATEGORICAL'. + """ + + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + id = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Parameter, + ) + final_measurement = proto.Field( + proto.MESSAGE, + number=5, + message='Measurement', + ) + measurements = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='Measurement', + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + client_id = proto.Field( + proto.STRING, + number=9, + ) + infeasible_reason = proto.Field( + proto.STRING, + number=10, + ) + custom_job = proto.Field( + proto.STRING, + number=11, + ) + web_access_uris = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) + + +class StudySpec(proto.Message): + r"""Represents specification of a Study. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + decay_curve_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.DecayCurveAutomatedStoppingSpec): + The automated early stopping spec using decay + curve rule. + + This field is a member of `oneof`_ ``automated_stopping_spec``. + median_automated_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.MedianAutomatedStoppingSpec): + The automated early stopping spec using + median rule. + + This field is a member of `oneof`_ ``automated_stopping_spec``. + convex_stop_config (google.cloud.aiplatform_v1beta1.types.StudySpec.ConvexStopConfig): + Deprecated. + The automated early stopping using convex + stopping rule. + + This field is a member of `oneof`_ ``automated_stopping_spec``. + convex_automated_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ConvexAutomatedStoppingSpec): + The automated early stopping spec using + convex stopping rule. + + This field is a member of `oneof`_ ``automated_stopping_spec``. + metrics (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec]): + Required. Metric specs for the Study. + parameters (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec]): + Required. The set of parameters to tune. + algorithm (google.cloud.aiplatform_v1beta1.types.StudySpec.Algorithm): + The search algorithm specified for the Study. + observation_noise (google.cloud.aiplatform_v1beta1.types.StudySpec.ObservationNoise): + The observation noise level of the study. + Currently only supported by the Vertex AI Vizier + service. Not supported by HyperparamterTuningJob + or TrainingPipeline. + measurement_selection_type (google.cloud.aiplatform_v1beta1.types.StudySpec.MeasurementSelectionType): + Describe which measurement selection type + will be used + """ + class Algorithm(proto.Enum): + r"""The available search algorithms for the Study.""" + ALGORITHM_UNSPECIFIED = 0 + GRID_SEARCH = 2 + RANDOM_SEARCH = 3 + + class ObservationNoise(proto.Enum): + r"""Describes the noise level of the repeated observations. + "Noisy" means that the repeated observations with the same Trial + parameters may lead to different metric evaluations. + """ + OBSERVATION_NOISE_UNSPECIFIED = 0 + LOW = 1 + HIGH = 2 + + class MeasurementSelectionType(proto.Enum): + r"""This indicates which measurement to use if/when the service + automatically selects the final measurement from previously reported + intermediate measurements. Choose this based on two considerations: + A) Do you expect your measurements to monotonically improve? If so, + choose LAST_MEASUREMENT. On the other hand, if you're in a situation + where your system can "over-train" and you expect the performance to + get better for a while but then start declining, choose + BEST_MEASUREMENT. B) Are your measurements significantly noisy + and/or irreproducible? If so, BEST_MEASUREMENT will tend to be + over-optimistic, and it may be better to choose LAST_MEASUREMENT. If + both or neither of (A) and (B) apply, it doesn't matter which + selection type is chosen. + """ + MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 + LAST_MEASUREMENT = 1 + BEST_MEASUREMENT = 2 + + class MetricSpec(proto.Message): + r"""Represents a metric to optimize. + + Attributes: + metric_id (str): + Required. The ID of the metric. Must not + contain whitespaces and must be unique amongst + all MetricSpecs. + goal (google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec.GoalType): + Required. The optimization goal of the + metric. + """ + class GoalType(proto.Enum): + r"""The available types of optimization goals.""" + GOAL_TYPE_UNSPECIFIED = 0 + MAXIMIZE = 1 + MINIMIZE = 2 + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + goal = proto.Field( + proto.ENUM, + number=2, + enum='StudySpec.MetricSpec.GoalType', + ) + + class ParameterSpec(proto.Message): + r"""Represents a single parameter to optimize. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + double_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DoubleValueSpec): + The value spec for a 'DOUBLE' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + integer_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.IntegerValueSpec): + The value spec for an 'INTEGER' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + categorical_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.CategoricalValueSpec): + The value spec for a 'CATEGORICAL' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + discrete_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DiscreteValueSpec): + The value spec for a 'DISCRETE' parameter. + + This field is a member of `oneof`_ ``parameter_value_spec``. + parameter_id (str): + Required. The ID of the parameter. Must not + contain whitespaces and must be unique amongst + all ParameterSpecs. + scale_type (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ScaleType): + How the parameter should be scaled. Leave unset for + ``CATEGORICAL`` parameters. + conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): + A conditional parameter node is active if the parameter's + value matches the conditional node's parent_value_condition. + + If two items in conditional_parameter_specs have the same + name, they must have disjoint parent_value_condition. + """ + class ScaleType(proto.Enum): + r"""The type of scaling that should be applied to this parameter.""" + SCALE_TYPE_UNSPECIFIED = 0 + UNIT_LINEAR_SCALE = 1 + UNIT_LOG_SCALE = 2 + UNIT_REVERSE_LOG_SCALE = 3 + + class DoubleValueSpec(proto.Message): + r"""Value specification for a parameter in ``DOUBLE`` type. + + Attributes: + min_value (float): + Required. Inclusive minimum value of the + parameter. + max_value (float): + Required. Inclusive maximum value of the + parameter. + default_value (float): + A default value for a ``DOUBLE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + min_value = proto.Field( + proto.DOUBLE, + number=1, + ) + max_value = proto.Field( + proto.DOUBLE, + number=2, + ) + default_value = proto.Field( + proto.DOUBLE, + number=4, + optional=True, + ) + + class IntegerValueSpec(proto.Message): + r"""Value specification for a parameter in ``INTEGER`` type. + + Attributes: + min_value (int): + Required. Inclusive minimum value of the + parameter. + max_value (int): + Required. Inclusive maximum value of the + parameter. + default_value (int): + A default value for an ``INTEGER`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + min_value = proto.Field( + proto.INT64, + number=1, + ) + max_value = proto.Field( + proto.INT64, + number=2, + ) + default_value = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + + class CategoricalValueSpec(proto.Message): + r"""Value specification for a parameter in ``CATEGORICAL`` type. + + Attributes: + values (Sequence[str]): + Required. The list of possible categories. + default_value (str): + A default value for a ``CATEGORICAL`` parameter that is + assumed to be a relatively good starting point. Unset value + signals that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + default_value = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + + class DiscreteValueSpec(proto.Message): + r"""Value specification for a parameter in ``DISCRETE`` type. + + Attributes: + values (Sequence[float]): + Required. A list of possible values. + The list should be in increasing order and at + least 1e-10 apart. For instance, this parameter + might have possible settings of 1.5, 2.5, and + 4.0. This list should not contain more than + 1,000 values. + default_value (float): + A default value for a ``DISCRETE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. It automatically + rounds to the nearest feasible discrete point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + + This field is a member of `oneof`_ ``_default_value``. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + default_value = proto.Field( + proto.DOUBLE, + number=3, + optional=True, + ) + + class ConditionalParameterSpec(proto.Message): + r"""Represents a parameter spec with condition from its parent + parameter. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + parent_discrete_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): + The spec for matching values from a parent parameter of + ``DISCRETE`` type. + + This field is a member of `oneof`_ ``parent_value_condition``. + parent_int_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): + The spec for matching values from a parent parameter of + ``INTEGER`` type. + + This field is a member of `oneof`_ ``parent_value_condition``. + parent_categorical_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): + The spec for matching values from a parent parameter of + ``CATEGORICAL`` type. + + This field is a member of `oneof`_ ``parent_value_condition``. + parameter_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec): + Required. The spec for a conditional + parameter. + """ + + class DiscreteValueCondition(proto.Message): + r"""Represents the spec to match discrete values from parent + parameter. + + Attributes: + values (Sequence[float]): + Required. Matches values of the parent parameter of + 'DISCRETE' type. All values must exist in + ``discrete_value_spec`` of parent parameter. + + The Epsilon of the value matching is 1e-10. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + class IntValueCondition(proto.Message): + r"""Represents the spec to match integer values from parent + parameter. + + Attributes: + values (Sequence[int]): + Required. Matches values of the parent parameter of + 'INTEGER' type. All values must lie in + ``integer_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.INT64, + number=1, + ) + + class CategoricalValueCondition(proto.Message): + r"""Represents the spec to match categorical values from parent + parameter. + + Attributes: + values (Sequence[str]): + Required. Matches values of the parent parameter of + 'CATEGORICAL' type. All values must exist in + ``categorical_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + parent_discrete_values = proto.Field( + proto.MESSAGE, + number=2, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', + ) + parent_int_values = proto.Field( + proto.MESSAGE, + number=3, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', + ) + parent_categorical_values = proto.Field( + proto.MESSAGE, + number=4, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', + ) + parameter_spec = proto.Field( + proto.MESSAGE, + number=1, + message='StudySpec.ParameterSpec', + ) + + double_value_spec = proto.Field( + proto.MESSAGE, + number=2, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DoubleValueSpec', + ) + integer_value_spec = proto.Field( + proto.MESSAGE, + number=3, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.IntegerValueSpec', + ) + categorical_value_spec = proto.Field( + proto.MESSAGE, + number=4, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.CategoricalValueSpec', + ) + discrete_value_spec = proto.Field( + proto.MESSAGE, + number=5, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DiscreteValueSpec', + ) + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + scale_type = proto.Field( + proto.ENUM, + number=6, + enum='StudySpec.ParameterSpec.ScaleType', + ) + conditional_parameter_specs = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='StudySpec.ParameterSpec.ConditionalParameterSpec', + ) + + class DecayCurveAutomatedStoppingSpec(proto.Message): + r"""The decay curve automated stopping rule builds a Gaussian + Process Regressor to predict the final objective value of a + Trial based on the already completed Trials and the intermediate + measurements of the current Trial. Early stopping is requested + for the current Trial if there is very low probability to exceed + the optimal value found so far. + + Attributes: + use_elapsed_duration (bool): + True if + [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration] + is used as the x-axis of each Trials Decay Curve. Otherwise, + [Measurement.step_count][google.cloud.aiplatform.v1beta1.Measurement.step_count] + will be used as the x-axis. + """ + + use_elapsed_duration = proto.Field( + proto.BOOL, + number=1, + ) + + class MedianAutomatedStoppingSpec(proto.Message): + r"""The median automated stopping rule stops a pending Trial if the + Trial's best objective_value is strictly below the median + 'performance' of all completed Trials reported up to the Trial's + last measurement. Currently, 'performance' refers to the running + average of the objective values reported by the Trial in each + measurement. + + Attributes: + use_elapsed_duration (bool): + True if median automated stopping rule applies on + [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration]. + It means that elapsed_duration field of latest measurement + of current Trial is used to compute median objective value + for each completed Trials. + """ + + use_elapsed_duration = proto.Field( + proto.BOOL, + number=1, + ) + + class ConvexAutomatedStoppingSpec(proto.Message): + r"""Configuration for ConvexAutomatedStoppingSpec. When there are enough + completed trials (configured by min_measurement_count), for pending + trials with enough measurements and steps, the policy first computes + an overestimate of the objective value at max_num_steps according to + the slope of the incomplete objective value curve. No prediction can + be made if the curve is completely flat. If the overestimation is + worse than the best objective value of the completed trials, this + pending trial will be early-stopped, but a last measurement will be + added to the pending trial with max_num_steps and predicted + objective value from the autoregression model. + + Attributes: + max_step_count (int): + Steps used in predicting the final objective for early + stopped trials. In general, it's set to be the same as the + defined steps in training / tuning. If not defined, it will + learn it from the completed trials. When use_steps is false, + this field is set to the maximum elapsed seconds. + min_step_count (int): + Minimum number of steps for a trial to complete. Trials + which do not have a measurement with step_count > + min_step_count won't be considered for early stopping. It's + ok to set it to 0, and a trial can be early stopped at any + stage. By default, min_step_count is set to be one-tenth of + the max_step_count. When use_elapsed_duration is true, this + field is set to the minimum elapsed seconds. + min_measurement_count (int): + The minimal number of measurements in a Trial. + Early-stopping checks will not trigger if less than + min_measurement_count+1 completed trials or pending trials + with less than min_measurement_count measurements. If not + defined, the default value is 5. + learning_rate_parameter_name (str): + The hyper-parameter name used in the tuning job that stands + for learning rate. Leave it blank if learning rate is not in + a parameter in tuning. The learning_rate is used to estimate + the objective value of the ongoing trial. + use_elapsed_duration (bool): + This bool determines whether or not the rule is applied + based on elapsed_secs or steps. If + use_elapsed_duration==false, the early stopping decision is + made according to the predicted objective values according + to the target steps. If use_elapsed_duration==true, + elapsed_secs is used instead of steps. Also, in this case, + the parameters max_num_steps and min_num_steps are + overloaded to contain max_elapsed_seconds and + min_elapsed_seconds. + """ + + max_step_count = proto.Field( + proto.INT64, + number=1, + ) + min_step_count = proto.Field( + proto.INT64, + number=2, + ) + min_measurement_count = proto.Field( + proto.INT64, + number=3, + ) + learning_rate_parameter_name = proto.Field( + proto.STRING, + number=4, + ) + use_elapsed_duration = proto.Field( + proto.BOOL, + number=5, + ) + + class ConvexStopConfig(proto.Message): + r"""Configuration for ConvexStopPolicy. + + Attributes: + max_num_steps (int): + Steps used in predicting the final objective for early + stopped trials. In general, it's set to be the same as the + defined steps in training / tuning. When use_steps is false, + this field is set to the maximum elapsed seconds. + min_num_steps (int): + Minimum number of steps for a trial to complete. Trials + which do not have a measurement with num_steps > + min_num_steps won't be considered for early stopping. It's + ok to set it to 0, and a trial can be early stopped at any + stage. By default, min_num_steps is set to be one-tenth of + the max_num_steps. When use_steps is false, this field is + set to the minimum elapsed seconds. + autoregressive_order (int): + The number of Trial measurements used in + autoregressive model for value prediction. A + trial won't be considered early stopping if has + fewer measurement points. + learning_rate_parameter_name (str): + The hyper-parameter name used in the tuning job that stands + for learning rate. Leave it blank if learning rate is not in + a parameter in tuning. The learning_rate is used to estimate + the objective value of the ongoing trial. + use_seconds (bool): + This bool determines whether or not the rule is applied + based on elapsed_secs or steps. If use_seconds==false, the + early stopping decision is made according to the predicted + objective values according to the target steps. If + use_seconds==true, elapsed_secs is used instead of steps. + Also, in this case, the parameters max_num_steps and + min_num_steps are overloaded to contain max_elapsed_seconds + and min_elapsed_seconds. + """ + + max_num_steps = proto.Field( + proto.INT64, + number=1, + ) + min_num_steps = proto.Field( + proto.INT64, + number=2, + ) + autoregressive_order = proto.Field( + proto.INT64, + number=3, + ) + learning_rate_parameter_name = proto.Field( + proto.STRING, + number=4, + ) + use_seconds = proto.Field( + proto.BOOL, + number=5, + ) + + decay_curve_stopping_spec = proto.Field( + proto.MESSAGE, + number=4, + oneof='automated_stopping_spec', + message=DecayCurveAutomatedStoppingSpec, + ) + median_automated_stopping_spec = proto.Field( + proto.MESSAGE, + number=5, + oneof='automated_stopping_spec', + message=MedianAutomatedStoppingSpec, + ) + convex_stop_config = proto.Field( + proto.MESSAGE, + number=8, + oneof='automated_stopping_spec', + message=ConvexStopConfig, + ) + convex_automated_stopping_spec = proto.Field( + proto.MESSAGE, + number=9, + oneof='automated_stopping_spec', + message=ConvexAutomatedStoppingSpec, + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=MetricSpec, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=ParameterSpec, + ) + algorithm = proto.Field( + proto.ENUM, + number=3, + enum=Algorithm, + ) + observation_noise = proto.Field( + proto.ENUM, + number=6, + enum=ObservationNoise, + ) + measurement_selection_type = proto.Field( + proto.ENUM, + number=7, + enum=MeasurementSelectionType, + ) + + +class Measurement(proto.Message): + r"""A message representing a Measurement of a Trial. A + Measurement contains the Metrics got by executing a Trial using + suggested hyperparameter values. + + Attributes: + elapsed_duration (google.protobuf.duration_pb2.Duration): + Output only. Time that the Trial has been + running at the point of this Measurement. + step_count (int): + Output only. The number of steps the machine + learning model has been trained for. Must be + non-negative. + metrics (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement.Metric]): + Output only. A list of metrics got by + evaluating the objective functions using + suggested Parameter values. + """ + + class Metric(proto.Message): + r"""A message representing a metric in the measurement. + + Attributes: + metric_id (str): + Output only. The ID of the Metric. The Metric should be + defined in [StudySpec's + Metrics][google.cloud.aiplatform.v1beta1.StudySpec.metrics]. + value (float): + Output only. The value for this metric. + """ + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.DOUBLE, + number=2, + ) + + elapsed_duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + step_count = proto.Field( + proto.INT64, + number=2, + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Metric, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py new file mode 100644 index 0000000000..7f18a75e6a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Tensorboard', + }, +) + + +class Tensorboard(proto.Message): + r"""Tensorboard is a physical database that stores users' + training metrics. A default Tensorboard is provided in each + region of a GCP project. If needed users can also create extra + Tensorboards in their projects. + + Attributes: + name (str): + Output only. Name of the Tensorboard. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + display_name (str): + Required. User provided name of this + Tensorboard. + description (str): + Description of this Tensorboard. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Tensorboard. If set, this Tensorboard and all + sub-resources of this Tensorboard will be + secured by this key. + blob_storage_path_prefix (str): + Output only. Consumer project Cloud Storage + path prefix used to store blob data, which can + either be a bucket or directory. Does not end + with a '/'. + run_count (int): + Output only. The number of Runs stored in + this Tensorboard. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was last updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize your Tensorboards. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Tensorboard (System labels + are excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=11, + message=gca_encryption_spec.EncryptionSpec, + ) + blob_storage_path_prefix = proto.Field( + proto.STRING, + number=10, + ) + run_count = proto.Field( + proto.INT32, + number=5, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py new file mode 100644 index 0000000000..8896c1aa08 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'Scalar', + 'TensorboardTensor', + 'TensorboardBlobSequence', + 'TensorboardBlob', + }, +) + + +class TimeSeriesData(proto.Message): + r"""All the data stored in a TensorboardTimeSeries. + + Attributes: + tensorboard_time_series_id (str): + Required. The ID of the + TensorboardTimeSeries, which will become the + final component of the TensorboardTimeSeries' + resource name + value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. The value type of this + time series. All the values in this time series + data must match this value type. + values (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): + Required. Data points in this time series. + """ + + tensorboard_time_series_id = proto.Field( + proto.STRING, + number=1, + ) + value_type = proto.Field( + proto.ENUM, + number=2, + enum=tensorboard_time_series.TensorboardTimeSeries.ValueType, + ) + values = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='TimeSeriesDataPoint', + ) + + +class TimeSeriesDataPoint(proto.Message): + r"""A TensorboardTimeSeries data point. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + scalar (google.cloud.aiplatform_v1beta1.types.Scalar): + A scalar value. + + This field is a member of `oneof`_ ``value``. + tensor (google.cloud.aiplatform_v1beta1.types.TensorboardTensor): + A tensor value. + + This field is a member of `oneof`_ ``value``. + blobs (google.cloud.aiplatform_v1beta1.types.TensorboardBlobSequence): + A blob sequence value. + + This field is a member of `oneof`_ ``value``. + wall_time (google.protobuf.timestamp_pb2.Timestamp): + Wall clock timestamp when this data point is + generated by the end user. + step (int): + Step index of this data point within the run. + """ + + scalar = proto.Field( + proto.MESSAGE, + number=3, + oneof='value', + message='Scalar', + ) + tensor = proto.Field( + proto.MESSAGE, + number=4, + oneof='value', + message='TensorboardTensor', + ) + blobs = proto.Field( + proto.MESSAGE, + number=5, + oneof='value', + message='TensorboardBlobSequence', + ) + wall_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + step = proto.Field( + proto.INT64, + number=2, + ) + + +class Scalar(proto.Message): + r"""One point viewable on a scalar metric plot. + + Attributes: + value (float): + Value of the point at this step / timestamp. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + ) + + +class TensorboardTensor(proto.Message): + r"""One point viewable on a tensor metric plot. + + Attributes: + value (bytes): + Required. Serialized form of + https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto + version_number (int): + Optional. Version number of TensorProto used to serialize + [value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value]. + """ + + value = proto.Field( + proto.BYTES, + number=1, + ) + version_number = proto.Field( + proto.INT32, + number=2, + ) + + +class TensorboardBlobSequence(proto.Message): + r"""One point viewable on a blob metric plot, but mostly just a wrapper + message to work around repeated fields can't be used directly within + ``oneof`` fields. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): + List of blobs contained within the sequence. + """ + + values = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='TensorboardBlob', + ) + + +class TensorboardBlob(proto.Message): + r"""One blob (e.g, image, graph) viewable on a blob metric plot. + + Attributes: + id (str): + Output only. A URI safe key uniquely + identifying a blob. Can be used to locate the + blob stored in the Cloud Storage bucket of the + consumer project. + data (bytes): + Optional. The bytes of the blob is not + present unless it's returned by the + ReadTensorboardBlobData endpoint. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + data = proto.Field( + proto.BYTES, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py new file mode 100644 index 0000000000..b582c506de --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TensorboardExperiment', + }, +) + + +class TensorboardExperiment(proto.Message): + r"""A TensorboardExperiment is a group of TensorboardRuns, that + are typically the results of a training job run, in a + Tensorboard. + + Attributes: + name (str): + Output only. Name of the TensorboardExperiment. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + display_name (str): + User provided name of this + TensorboardExperiment. + description (str): + Description of this TensorboardExperiment. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was last updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + + - output only, its value is the + [metadata_schema's][metadata_schema_uri] title. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + source (str): + Immutable. Source of the + TensorboardExperiment. Example: a custom + training job. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + source = proto.Field( + proto.STRING, + number=8, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py new file mode 100644 index 0000000000..18db31e37f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TensorboardRun', + }, +) + + +class TensorboardRun(proto.Message): + r"""TensorboardRun maps to a specific execution of a training job + with a given set of hyperparameter values, model definition, + dataset, etc + + Attributes: + name (str): + Output only. Name of the TensorboardRun. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + display_name (str): + Required. User provided name of this + TensorboardRun. This value must be unique among + all TensorboardRuns belonging to the same parent + TensorboardExperiment. + description (str): + Description of this TensorboardRun. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was last updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to organize your + TensorboardRuns. + + This field will be used to filter and visualize Runs in the + Tensorboard UI. For example, a Vertex AI training job can + set a label aiplatform.googleapis.com/training_job_id=xxxxx + to all the runs created within that job. An end user can set + a label experiment_id=xxxxx for all the runs produced in a + Jupyter notebook. These runs can be grouped by a label value + and visualized together in the Tensorboard UI. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one TensorboardRun (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py new file mode 100644 index 0000000000..e62df6b7f0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py @@ -0,0 +1,1224 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateTensorboardRequest', + 'GetTensorboardRequest', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'UpdateTensorboardRequest', + 'DeleteTensorboardRequest', + 'CreateTensorboardExperimentRequest', + 'GetTensorboardExperimentRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'UpdateTensorboardExperimentRequest', + 'DeleteTensorboardExperimentRequest', + 'BatchCreateTensorboardRunsRequest', + 'BatchCreateTensorboardRunsResponse', + 'CreateTensorboardRunRequest', + 'GetTensorboardRunRequest', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'UpdateTensorboardRunRequest', + 'DeleteTensorboardRunRequest', + 'BatchCreateTensorboardTimeSeriesRequest', + 'BatchCreateTensorboardTimeSeriesResponse', + 'CreateTensorboardTimeSeriesRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'UpdateTensorboardTimeSeriesRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'BatchReadTensorboardTimeSeriesDataRequest', + 'BatchReadTensorboardTimeSeriesDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'WriteTensorboardExperimentDataRequest', + 'WriteTensorboardExperimentDataResponse', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'CreateTensorboardOperationMetadata', + 'UpdateTensorboardOperationMetadata', + }, +) + + +class CreateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard.Tensorboard, + ) + + +class GetTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Tensorboards. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the Tensorboards that match the filter + expression. + page_size (int): + The maximum number of Tensorboards to return. + The service may return fewer than this value. If + unspecified, at most 100 Tensorboards will be + returned. The maximum value is 100; values above + 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Attributes: + tensorboards (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard]): + The Tensorboards mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboards = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard.Tensorboard, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard.Tensorboard, + ) + + +class DeleteTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard to be deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + + Attributes: + parent (str): + Required. The resource name of the Tensorboard to create the + TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + The TensorboardExperiment to create. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + tensorboard_experiment_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardExperimentsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardExperiments. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + filter (str): + Lists the TensorboardExperiments that match + the filter expression. + page_size (int): + The maximum number of TensorboardExperiments + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardExperiments will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardExperimentsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + tensorboard_experiments (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment]): + The TensorboardExperiments mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardExperimentsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_experiments = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is used + to identify the TensorboardExperiment to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + + +class DeleteTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class BatchCreateTensorboardRunsRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardRuns in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The parent field in the CreateTensorboardRunRequest messages + must match this field. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]): + Required. The request message specifying the + TensorboardRuns to create. A maximum of 1000 + TensorboardRuns can be created in a batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='CreateTensorboardRunRequest', + ) + + +class BatchCreateTensorboardRunsResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. + + Attributes: + tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): + The created TensorboardRuns. + """ + + tensorboard_runs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_run.TensorboardRun, + ) + + +class CreateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun to create. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which will + become the final component of the Tensorboard run's resource + name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_run = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_run.TensorboardRun, + ) + tensorboard_run_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ReadTensorboardBlobDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + time_series (str): + Required. The resource name of the TensorboardTimeSeries to + list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + blob_ids (Sequence[str]): + IDs of the blobs to read. + """ + + time_series = proto.Field( + proto.STRING, + number=1, + ) + blob_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class ReadTensorboardBlobDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + blobs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): + Blob messages containing blob bytes. + """ + + blobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TensorboardBlob, + ) + + +class ListTensorboardRunsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Attributes: + parent (str): + Required. The resource name of the + TensorboardExperiment to list TensorboardRuns. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + filter (str): + Lists the TensorboardRuns that match the + filter expression. + page_size (int): + The maximum number of TensorboardRuns to + return. The service may return fewer than this + value. If unspecified, at most 50 + TensorboardRuns will be returned. The maximum + value is 1000; values above 1000 will be coerced + to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardRunsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Attributes: + tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): + The TensorboardRuns mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardRunsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_runs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_run.TensorboardRun, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the update. + The fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_run = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_run.TensorboardRun, + ) + + +class DeleteTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class BatchCreateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardExperiment to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + The TensorboardRuns referenced by the parent fields in the + CreateTensorboardTimeSeriesRequest messages must be sub + resources of this TensorboardExperiment. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]): + Required. The request message specifying the + TensorboardTimeSeries to create. A maximum of + 1000 TensorboardTimeSeries can be created in a + batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='CreateTensorboardTimeSeriesRequest', + ) + + +class BatchCreateTensorboardTimeSeriesResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. + + Attributes: + tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): + The created TensorboardTimeSeries. + """ + + tensorboard_time_series = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class CreateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardRun to create + the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + tensorboard_time_series_id (str): + Optional. The user specified unique ID to use for the + TensorboardTimeSeries, which will become the final component + of the TensorboardTimeSeries's resource name. This value + should match "[a-z0-9][a-z0-9-]{0, 127}". + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_time_series_id = proto.Field( + proto.STRING, + number=3, + ) + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class GetTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the + TensorboardRun to list TensorboardTimeSeries. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + filter (str): + Lists the TensorboardTimeSeries that match + the filter expression. + page_size (int): + The maximum number of TensorboardTimeSeries + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardTimeSeries will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardTimeSeriesResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): + The TensorboardTimeSeries mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardTimeSeriesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_time_series = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is used + to identify the TensorboardTimeSeries to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class DeleteTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class BatchReadTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + Attributes: + tensorboard (str): + Required. The resource name of the Tensorboard containing + TensorboardTimeSeries to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. + The TensorboardTimeSeries referenced by + [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] + must be sub resources of this Tensorboard. + time_series (Sequence[str]): + Required. The resource names of the TensorboardTimeSeries to + read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + tensorboard = proto.Field( + proto.STRING, + number=1, + ) + time_series = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class BatchReadTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. + + Attributes: + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + The returned time series data. + """ + + time_series_data = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesData, + ) + + +class ReadTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + max_data_points (int): + The maximum number of TensorboardTimeSeries' + data to return. + This value should be a positive integer. + This value can be set to -1 to return all data. + filter (str): + Reads the TensorboardTimeSeries' data that + match the filter expression. + """ + + tensorboard_time_series = proto.Field( + proto.STRING, + number=1, + ) + max_data_points = proto.Field( + proto.INT32, + number=2, + ) + filter = proto.Field( + proto.STRING, + number=3, + ) + + +class ReadTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + time_series_data (google.cloud.aiplatform_v1beta1.types.TimeSeriesData): + The returned time series data. + """ + + time_series_data = proto.Field( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardExperimentDataRequest(proto.Message): + r"""Request message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + + Attributes: + tensorboard_experiment (str): + Required. The resource name of the TensorboardExperiment to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + write_run_data_requests (Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]): + Required. Requests containing per-run + TensorboardTimeSeries data to write. + """ + + tensorboard_experiment = proto.Field( + proto.STRING, + number=1, + ) + write_run_data_requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='WriteTensorboardRunDataRequest', + ) + + +class WriteTensorboardExperimentDataResponse(proto.Message): + r"""Response message for + [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. + + """ + + +class WriteTensorboardRunDataRequest(proto.Message): + r"""Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + Attributes: + tensorboard_run (str): + Required. The resource name of the TensorboardRun to write + data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries data to + write. Values with in a time series are indexed + by their step value. Repeated writes to the same + step will overwrite the existing value for that + step. + The upper limit of data points per write request + is 5000. + """ + + tensorboard_run = proto.Field( + proto.STRING, + number=1, + ) + time_series_data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardRunDataResponse(proto.Message): + r"""Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + """ + + +class ExportTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + filter (str): + Exports the TensorboardTimeSeries' data that + match the filter expression. + page_size (int): + The maximum number of data points to return per page. The + default page_size will be 1000. Values must be between 1 and + 10000. Values above 10000 will be coerced to 10000. + page_token (str): + A page token, received from a previous + [TensorboardService.ExportTensorboardTimeSeries][] call. + Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ExportTensorboardTimeSeries][] must + match the call that provided the page token. + order_by (str): + Field to use to sort the + TensorboardTimeSeries' data. By default, + TensorboardTimeSeries' data will be returned in + a pseudo random order. + """ + + tensorboard_time_series = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + + +class ExportTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + time_series_data_points (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): + The returned time series data points. + next_page_token (str): + A token, which can be sent as + [ExportTensorboardTimeSeriesRequest.page_token][] to + retrieve the next page. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + time_series_data_points = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesDataPoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform create Tensorboard. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UpdateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform update Tensorboard. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py new file mode 100644 index 0000000000..3d13493958 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TensorboardTimeSeries', + }, +) + + +class TensorboardTimeSeries(proto.Message): + r"""TensorboardTimeSeries maps to times series produced in + training runs + + Attributes: + name (str): + Output only. Name of the + TensorboardTimeSeries. + display_name (str): + Required. User provided name of this + TensorboardTimeSeries. This value should be + unique among all TensorboardTimeSeries resources + belonging to the same TensorboardRun resource + (parent resource). + description (str): + Description of this TensorboardTimeSeries. + value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. Type of + TensorboardTimeSeries value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was last updated. + etag (str): + Used to perform a consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + plugin_name (str): + Immutable. Name of the plugin this time + series pertain to. Such as Scalar, Tensor, Blob + plugin_data (bytes): + Data of the current plugin, with the size + limited to 65KB. + metadata (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.Metadata): + Output only. Scalar, Tensor, or Blob metadata + for this TensorboardTimeSeries. + """ + class ValueType(proto.Enum): + r"""An enum representing the value type of a + TensorboardTimeSeries. + """ + VALUE_TYPE_UNSPECIFIED = 0 + SCALAR = 1 + TENSOR = 2 + BLOB_SEQUENCE = 3 + + class Metadata(proto.Message): + r"""Describes metadata for a TensorboardTimeSeries. + + Attributes: + max_step (int): + Output only. Max step index of all data + points within a TensorboardTimeSeries. + max_wall_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Max wall clock timestamp of all + data points within a TensorboardTimeSeries. + max_blob_sequence_length (int): + Output only. The largest blob sequence length (number of + blobs) of all data points in this time series, if its + ValueType is BLOB_SEQUENCE. + """ + + max_step = proto.Field( + proto.INT64, + number=1, + ) + max_wall_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + max_blob_sequence_length = proto.Field( + proto.INT64, + number=3, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + value_type = proto.Field( + proto.ENUM, + number=4, + enum=ValueType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + plugin_name = proto.Field( + proto.STRING, + number=8, + ) + plugin_data = proto.Field( + proto.BYTES, + number=9, + ) + metadata = proto.Field( + proto.MESSAGE, + number=10, + message=Metadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py new file mode 100644 index 0000000000..272c7c5257 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -0,0 +1,652 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TrainingPipeline', + 'InputDataConfig', + 'FractionSplit', + 'FilterSplit', + 'PredefinedSplit', + 'TimestampSplit', + 'StratifiedSplit', + }, +) + + +class TrainingPipeline(proto.Message): + r"""The TrainingPipeline orchestrates tasks associated with training a + Model. It always executes the training task, and optionally may also + export data from Vertex AI's Dataset which becomes the training + input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + Attributes: + name (str): + Output only. Resource name of the + TrainingPipeline. + display_name (str): + Required. The user-defined name of this + TrainingPipeline. + input_data_config (google.cloud.aiplatform_v1beta1.types.InputDataConfig): + Specifies Vertex AI owned input data that may be used for + training the Model. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + should make clear whether this config is used and if there + are any special requirements on how it should be filled. If + nothing about this config is mentioned in the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], + then it should be assumed that the TrainingPipeline does not + depend on this configuration. + training_task_definition (str): + Required. A Google Cloud Storage path to the + YAML file that defines the training task which + is responsible for producing the model artifact, + and may also include additional auxiliary work. + The definition files that can be used here are + found in + gs://google-cloud-aiplatform/schema/trainingjob/definition/. + Note: The URI given on output will be immutable + and probably different, including the URI + scheme, than the one given on input. The output + URI will point to a location where the user only + has a read access. + training_task_inputs (google.protobuf.struct_pb2.Value): + Required. The training task's parameter(s), as specified in + the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s + ``inputs``. + training_task_metadata (google.protobuf.struct_pb2.Value): + Output only. The metadata information as specified in the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s + ``metadata``. This metadata is an auxiliary runtime and + final information about the training task. While the + pipeline is running this information is populated only at a + best effort basis. Only present if the pipeline's + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + contains ``metadata`` object. + model_to_upload (google.cloud.aiplatform_v1beta1.types.Model): + Describes the Model that may be uploaded (via + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]) + by this TrainingPipeline. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + should make clear whether this Model description should be + populated, and if there are any special requirements + regarding how it should be filled. If nothing is mentioned + in the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], + then it should be assumed that this field should not be + filled and the training task either uploads the Model + without a need of this information, or that training task + does not support uploading a Model as part of the pipeline. + When the Pipeline's state becomes + ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been + uploaded into Vertex AI, then the model_to_upload's resource + [name][google.cloud.aiplatform.v1beta1.Model.name] is + populated. The Model is always uploaded into the Project and + Location in which this pipeline is. + model_id (str): + Optional. The ID to use for the uploaded Model, which will + become the final component of the model resource name. + + This value may be up to 63 characters, and valid characters + are ``[a-z0-9_-]``. The first character cannot be a number + or hyphen. + parent_model (str): + Optional. When specify this field, the ``model_to_upload`` + will not be uploaded as a new model, instead, it will become + a new version of this ``parent_model``. + state (google.cloud.aiplatform_v1beta1.types.PipelineState): + Output only. The detailed state of the + pipeline. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the pipeline's state is + ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline for the first + time entered the ``PIPELINE_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline entered any of + the following states: ``PIPELINE_STATE_SUCCEEDED``, + ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was most recently updated. + labels (Mapping[str, str]): + The labels with user-defined metadata to + organize TrainingPipelines. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a TrainingPipeline. + If set, this TrainingPipeline will be secured by this key. + + Note: Model trained by this TrainingPipeline is also secured + by this key if + [model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec] + is not set separately. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + input_data_config = proto.Field( + proto.MESSAGE, + number=3, + message='InputDataConfig', + ) + training_task_definition = proto.Field( + proto.STRING, + number=4, + ) + training_task_inputs = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + training_task_metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + model_to_upload = proto.Field( + proto.MESSAGE, + number=7, + message=model.Model, + ) + model_id = proto.Field( + proto.STRING, + number=22, + ) + parent_model = proto.Field( + proto.STRING, + number=21, + ) + state = proto.Field( + proto.ENUM, + number=9, + enum=pipeline_state.PipelineState, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=18, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class InputDataConfig(proto.Message): + r"""Specifies Vertex AI owned input data to be used for training, + and possibly evaluating, the Model. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + fraction_split (google.cloud.aiplatform_v1beta1.types.FractionSplit): + Split based on fractions defining the size of + each set. + + This field is a member of `oneof`_ ``split``. + filter_split (google.cloud.aiplatform_v1beta1.types.FilterSplit): + Split based on the provided filters for each + set. + + This field is a member of `oneof`_ ``split``. + predefined_split (google.cloud.aiplatform_v1beta1.types.PredefinedSplit): + Supported only for tabular Datasets. + Split based on a predefined key. + + This field is a member of `oneof`_ ``split``. + timestamp_split (google.cloud.aiplatform_v1beta1.types.TimestampSplit): + Supported only for tabular Datasets. + Split based on the timestamp of the input data + pieces. + + This field is a member of `oneof`_ ``split``. + stratified_split (google.cloud.aiplatform_v1beta1.types.StratifiedSplit): + Supported only for tabular Datasets. + Split based on the distribution of the specified + column. + + This field is a member of `oneof`_ ``split``. + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location where the training data is to be + written to. In the given directory a new directory is + created with name: + ``dataset---`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All training input data is written into that + directory. + + The Vertex AI environment variables representing Cloud + Storage data URIs are represented in the Cloud Storage + wildcard format to support sharded data. e.g.: + "gs://.../training-*.jsonl" + + - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for + tabular data + + - AIP_TRAINING_DATA_URI = + "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" + + - AIP_VALIDATION_DATA_URI = + "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" + + - AIP_TEST_DATA_URI = + "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". + + This field is a member of `oneof`_ ``destination``. + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + Only applicable to custom training with tabular Dataset with + BigQuery source. + + The BigQuery project location where the training data is to + be written to. In the given project a new dataset is created + with name + ``dataset___`` + where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All + training input data is written into that dataset. In the + dataset three tables are created, ``training``, + ``validation`` and ``test``. + + - AIP_DATA_FORMAT = "bigquery". + + - AIP_TRAINING_DATA_URI = + "bigquery_destination.dataset\_\ **\ .training" + + - AIP_VALIDATION_DATA_URI = + "bigquery_destination.dataset\_\ **\ .validation" + + - AIP_TEST_DATA_URI = + "bigquery_destination.dataset\_\ **\ .test". + + This field is a member of `oneof`_ ``destination``. + dataset_id (str): + Required. The ID of the Dataset in the same Project and + Location which data will be used to train the Model. The + Dataset must use schema compatible with Model being trained, + and what is compatible should be described in the used + TrainingPipeline's [training_task_definition] + [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. + For tabular Datasets, all their data is exported to + training, to pick and choose from. + annotations_filter (str): + Applicable only to Datasets that have DataItems and + Annotations. + + A filter on Annotations of the Dataset. Only Annotations + that both match this filter and belong to DataItems not + ignored by the split method are used in respectively + training, validation or test role, depending on the role of + the DataItem they are on (for the auto-assigned that role is + decided by Vertex AI). A filter with same syntax as the one + used in + [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] + may be used, but note here it filters across all Annotations + of the Dataset, and not just within a single DataItem. + annotation_schema_uri (str): + Applicable only to custom training with Datasets that have + DataItems and Annotations. + + Cloud Storage URI that points to a YAML file describing the + annotation schema. The schema is defined as an OpenAPI 3.0.2 + `Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/ , + note that the chosen schema must be consistent with + [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + of the Dataset specified by + [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id]. + + Only Annotations that both match this schema and belong to + DataItems not ignored by the split method are used in + respectively training, validation or test role, depending on + the role of the DataItem they are on. + + When used in conjunction with + [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], + the Annotations used for training are filtered by both + [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter] + and + [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri]. + """ + + fraction_split = proto.Field( + proto.MESSAGE, + number=2, + oneof='split', + message='FractionSplit', + ) + filter_split = proto.Field( + proto.MESSAGE, + number=3, + oneof='split', + message='FilterSplit', + ) + predefined_split = proto.Field( + proto.MESSAGE, + number=4, + oneof='split', + message='PredefinedSplit', + ) + timestamp_split = proto.Field( + proto.MESSAGE, + number=5, + oneof='split', + message='TimestampSplit', + ) + stratified_split = proto.Field( + proto.MESSAGE, + number=12, + oneof='split', + message='StratifiedSplit', + ) + gcs_destination = proto.Field( + proto.MESSAGE, + number=8, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=10, + oneof='destination', + message=io.BigQueryDestination, + ) + dataset_id = proto.Field( + proto.STRING, + number=1, + ) + annotations_filter = proto.Field( + proto.STRING, + number=6, + ) + annotation_schema_uri = proto.Field( + proto.STRING, + number=9, + ) + + +class FractionSplit(proto.Message): + r"""Assigns the input data to training, validation, and test sets as per + the given fractions. Any of ``training_fraction``, + ``validation_fraction`` and ``test_fraction`` may optionally be + provided, they must sum to up to 1. If the provided ones sum to less + than 1, the remainder is assigned to sets as decided by Vertex AI. + If none of the fractions are set, by default roughly 80% of data is + used for training, 10% for validation, and 10% for test. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + + +class FilterSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the given filters, data pieces not matched by any + filter are ignored. Currently only supported for Datasets + containing DataItems. + If any of the filters in this message are to match nothing, then + they can be set as '-' (the minus sign). + + Supported only for unstructured Datasets. + + Attributes: + training_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to train the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + validation_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to validate the Model. A + filter with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + test_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to test the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + """ + + training_filter = proto.Field( + proto.STRING, + number=1, + ) + validation_filter = proto.Field( + proto.STRING, + number=2, + ) + test_filter = proto.Field( + proto.STRING, + number=3, + ) + + +class PredefinedSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the value of a provided key. + + Supported only for tabular Datasets. + + Attributes: + key (str): + Required. The key is a name of one of the Dataset's data + columns. The value of the key (either the label's value or + value in the column) must be one of {``training``, + ``validation``, ``test``}, and it defines to which set the + given piece of data is assigned. If for a piece of data the + key is not present or has an invalid value, that piece is + ignored by the pipeline. + """ + + key = proto.Field( + proto.STRING, + number=1, + ) + + +class TimestampSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the Dataset's data + columns. The values of the key (the values in the column) + must be in RFC 3339 ``date-time`` format, where + ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If + for a piece of data the key is not present or has an invalid + value, that piece is ignored by the pipeline. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + key = proto.Field( + proto.STRING, + number=4, + ) + + +class StratifiedSplit(proto.Message): + r"""Assigns input data to the training, validation, and test sets so + that the distribution of values found in the categorical column (as + specified by the ``key`` field) is mirrored within each split. The + fraction values determine the relative sizes of the splits. + + For example, if the specified column has three values, with 50% of + the rows having value "A", 25% value "B", and 25% value "C", and the + split fractions are specified as 80/10/10, then the training set + will constitute 80% of the training data, with about 50% of the + training set rows having the value "A" for the specified column, + about 25% having the value "B", and about 25% having the value "C". + + Only the top 500 occurring values are used; any values not in the + top 500 values are randomly assigned to a split. If less than three + rows contain a specific value, those rows are randomly assigned. + + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the + Dataset's data columns. The key provided must be + for a categorical column. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + key = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py new file mode 100644 index 0000000000..b55ba3ab02 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + }, +) + + +class BoolArray(proto.Message): + r"""A list of boolean values. + + Attributes: + values (Sequence[bool]): + A list of bool values. + """ + + values = proto.RepeatedField( + proto.BOOL, + number=1, + ) + + +class DoubleArray(proto.Message): + r"""A list of double values. + + Attributes: + values (Sequence[float]): + A list of bool values. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + +class Int64Array(proto.Message): + r"""A list of int64 values. + + Attributes: + values (Sequence[int]): + A list of int64 values. + """ + + values = proto.RepeatedField( + proto.INT64, + number=1, + ) + + +class StringArray(proto.Message): + r"""A list of string values. + + Attributes: + values (Sequence[str]): + A list of string values. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py new file mode 100644 index 0000000000..8e303537a4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'UnmanagedContainerModel', + }, +) + + +class UnmanagedContainerModel(proto.Message): + r"""Contains model information necessary to perform batch + prediction without requiring a full model import. + + Attributes: + artifact_uri (str): + The path to the directory containing the + Model artifact and any of its supporting files. + predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): + Contains the schemata used in Model's + predictions and explanations + container_spec (google.cloud.aiplatform_v1beta1.types.ModelContainerSpec): + Input only. The specification of the + container that is to be used when deploying this + Model. + """ + + artifact_uri = proto.Field( + proto.STRING, + number=1, + ) + predict_schemata = proto.Field( + proto.MESSAGE, + number=2, + message=model.PredictSchemata, + ) + container_spec = proto.Field( + proto.MESSAGE, + number=3, + message=model.ModelContainerSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py new file mode 100644 index 0000000000..a3a7516b1d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'UserActionReference', + }, +) + + +class UserActionReference(proto.Message): + r"""References an API call. It contains more information about + long running operation and Jobs that are triggered by the API + call. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + operation (str): + For API calls that return a long running + operation. Resource name of the long running + operation. Format: + 'projects/{project}/locations/{location}/operations/{operation}' + + This field is a member of `oneof`_ ``reference``. + data_labeling_job (str): + For API calls that start a LabelingJob. Resource name of the + LabelingJob. Format: + 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' + + This field is a member of `oneof`_ ``reference``. + method (str): + The method name of the API RPC call. For + example, + "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". + """ + + operation = proto.Field( + proto.STRING, + number=1, + oneof='reference', + ) + data_labeling_job = proto.Field( + proto.STRING, + number=2, + oneof='reference', + ) + method = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py new file mode 100644 index 0000000000..80f280989f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Value', + }, +) + + +class Value(proto.Message): + r"""Value is the value of the field. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + int_value (int): + An integer value. + + This field is a member of `oneof`_ ``value``. + double_value (float): + A double value. + + This field is a member of `oneof`_ ``value``. + string_value (str): + A string value. + + This field is a member of `oneof`_ ``value``. + """ + + int_value = proto.Field( + proto.INT64, + number=1, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=2, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=3, + oneof='value', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py new file mode 100644 index 0000000000..8f1b5f1551 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py @@ -0,0 +1,589 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'GetStudyRequest', + 'CreateStudyRequest', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'DeleteStudyRequest', + 'LookupStudyRequest', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', + 'SuggestTrialsMetadata', + 'CreateTrialRequest', + 'GetTrialRequest', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'AddTrialMeasurementRequest', + 'CompleteTrialRequest', + 'DeleteTrialRequest', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CheckTrialEarlyStoppingStateMetatdata', + 'StopTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + }, +) + + +class GetStudyRequest(proto.Message): + r"""Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. + + Attributes: + name (str): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateStudyRequest(proto.Message): + r"""Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + CustomJob in. Format: + ``projects/{project}/locations/{location}`` + study (google.cloud.aiplatform_v1beta1.types.Study): + Required. The Study configuration used to + create the Study. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + study = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Study, + ) + + +class ListStudiesRequest(proto.Message): + r"""Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + page_token (str): + Optional. A page token to request the next + page of results. If unspecified, there are no + subsequent pages. + page_size (int): + Optional. The maximum number of studies to + return per "page" of results. If unspecified, + service will pick an appropriate default. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + + +class ListStudiesResponse(proto.Message): + r"""Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Attributes: + studies (Sequence[google.cloud.aiplatform_v1beta1.types.Study]): + The studies associated with the project. + next_page_token (str): + Passes this token as the ``page_token`` field of the request + for a subsequent call. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + studies = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Study, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteStudyRequest(proto.Message): + r"""Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. + + Attributes: + name (str): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class LookupStudyRequest(proto.Message): + r"""Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. + + Attributes: + parent (str): + Required. The resource name of the Location to get the Study + from. Format: ``projects/{project}/locations/{location}`` + display_name (str): + Required. The user-defined display name of + the Study + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + + +class SuggestTrialsRequest(proto.Message): + r"""Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + Attributes: + parent (str): + Required. The project and location that the Study belongs + to. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + suggestion_count (int): + Required. The number of suggestions + requested. + client_id (str): + Required. The identifier of the client that is requesting + the suggestion. + + If multiple SuggestTrialsRequests have the same + ``client_id``, the service will return the identical + suggested Trial if the Trial is pending, and provide a new + Trial if the last suggested Trial was completed. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + suggestion_count = proto.Field( + proto.INT32, + number=2, + ) + client_id = proto.Field( + proto.STRING, + number=3, + ) + + +class SuggestTrialsResponse(proto.Message): + r"""Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + Attributes: + trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + A list of Trials. + study_state (google.cloud.aiplatform_v1beta1.types.Study.State): + The state of the Study. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which operation processing + completed. + """ + + trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + study_state = proto.Field( + proto.ENUM, + number=2, + enum=gca_study.Study.State, + ) + start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class SuggestTrialsMetadata(proto.Message): + r"""Details of operations that perform Trials suggestion. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for suggesting Trials. + client_id (str): + The identifier of the client that is requesting the + suggestion. + + If multiple SuggestTrialsRequests have the same + ``client_id``, the service will return the identical + suggested Trial if the Trial is pending, and provide a new + Trial if the last suggested Trial was completed. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + client_id = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateTrialRequest(proto.Message): + r"""Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. + + Attributes: + parent (str): + Required. The resource name of the Study to create the Trial + in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + trial (google.cloud.aiplatform_v1beta1.types.Trial): + Required. The Trial to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + trial = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Trial, + ) + + +class GetTrialRequest(proto.Message): + r"""Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. + + Attributes: + name (str): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTrialsRequest(proto.Message): + r"""Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Attributes: + parent (str): + Required. The resource name of the Study to list the Trial + from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + page_token (str): + Optional. A page token to request the next + page of results. If unspecified, there are no + subsequent pages. + page_size (int): + Optional. The number of Trials to retrieve + per "page" of results. If unspecified, the + service will pick an appropriate default. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + + +class ListTrialsResponse(proto.Message): + r"""Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Attributes: + trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + The Trials associated with the Study. + next_page_token (str): + Pass this token as the ``page_token`` field of the request + for a subsequent call. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class AddTrialMeasurementRequest(proto.Message): + r"""Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. + + Attributes: + trial_name (str): + Required. The name of the trial to add measurement. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + measurement (google.cloud.aiplatform_v1beta1.types.Measurement): + Required. The measurement to be added to a + Trial. + """ + + trial_name = proto.Field( + proto.STRING, + number=1, + ) + measurement = proto.Field( + proto.MESSAGE, + number=3, + message=gca_study.Measurement, + ) + + +class CompleteTrialRequest(proto.Message): + r"""Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): + Optional. If provided, it will be used as the completed + Trial's final_measurement; Otherwise, the service will + auto-select a previously reported measurement as the + final-measurement + trial_infeasible (bool): + Optional. True if the Trial cannot be run with the given + Parameter, and final_measurement will be ignored. + infeasible_reason (str): + Optional. A human readable reason why the trial was + infeasible. This should only be provided if + ``trial_infeasible`` is true. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + final_measurement = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Measurement, + ) + trial_infeasible = proto.Field( + proto.BOOL, + number=3, + ) + infeasible_reason = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteTrialRequest(proto.Message): + r"""Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CheckTrialEarlyStoppingStateRequest(proto.Message): + r"""Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + Attributes: + trial_name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + trial_name = proto.Field( + proto.STRING, + number=1, + ) + + +class CheckTrialEarlyStoppingStateResponse(proto.Message): + r"""Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + Attributes: + should_stop (bool): + True if the Trial should stop. + """ + + should_stop = proto.Field( + proto.BOOL, + number=1, + ) + + +class CheckTrialEarlyStoppingStateMetatdata(proto.Message): + r"""This message will be placed in the metadata field of a + google.longrunning.Operation associated with a + CheckTrialEarlyStoppingState request. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for suggesting Trials. + study (str): + The name of the Study that the Trial belongs + to. + trial (str): + The Trial name. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + study = proto.Field( + proto.STRING, + number=2, + ) + trial = proto.Field( + proto.STRING, + number=3, + ) + + +class StopTrialRequest(proto.Message): + r"""Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListOptimalTrialsRequest(proto.Message): + r"""Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + Attributes: + parent (str): + Required. The name of the Study that the + optimal Trial belongs to. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class ListOptimalTrialsResponse(proto.Message): + r"""Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + Attributes: + optimal_trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + The pareto-optimal Trials for multiple objective Study or + the optimal trial for single objective Study. The definition + of pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + """ + + optimal_trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1beta1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py new file mode 100644 index 0000000000..9e500c530c --- /dev/null +++ b/owl-bot-staging/v1beta1/noxfile.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +ALL_PYTHON = [ + "3.6", + "3.7", + "3.8", + "3.9", + "3.10", +] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + +BLACK_VERSION = "black==19.10b0" +BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] +DEFAULT_PYTHON_VERSION = "3.9" + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", + "blacken", + "lint", + "lint_setup_py", +] + +@nox.session(python=ALL_PYTHON) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=ALL_PYTHON) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx==4.0.1", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run("flake8", "google", "tests", "samples") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py new file mode 100644 index 0000000000..be32ee2193 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_CreateDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_CreateDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py new file mode 100644 index 0000000000..5fd6715c4b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_CreateDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDatasetRequest( + parent="parent_value", + dataset=dataset, + ) + + # Make the request + operation = client.create_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_CreateDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py new file mode 100644 index 0000000000..630f191611 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_DeleteDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_DeleteDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py new file mode 100644 index 0000000000..38cca9813c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_DeleteDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDatasetRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_DeleteDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_async.py new file mode 100644 index 0000000000..4079305540 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ExportData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + export_config = aiplatform_v1beta1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1beta1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ExportData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_sync.py new file mode 100644 index 0000000000..18edfe461e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_export_data_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ExportData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + export_config = aiplatform_v1beta1.ExportDataConfig() + export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + + request = aiplatform_v1beta1.ExportDataRequest( + name="name_value", + export_config=export_config, + ) + + # Make the request + operation = client.export_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ExportData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py new file mode 100644 index 0000000000..91d937dc52 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = await client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py new file mode 100644 index 0000000000..747b3920fa --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAnnotationSpec +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_annotation_spec(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetAnnotationSpecRequest( + name="name_value", + ) + + # Make the request + response = client.get_annotation_spec(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py new file mode 100644 index 0000000000..bfbe3efe3a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_GetDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = await client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_GetDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py new file mode 100644 index 0000000000..dfce9fe3ff --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_GetDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDatasetRequest( + name="name_value", + ) + + # Make the request + response = client.get_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_GetDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_async.py new file mode 100644 index 0000000000..4bf1ee252e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ImportData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1beta1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1beta1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ImportData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_sync.py new file mode 100644 index 0000000000..f639bf1027 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_import_data_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ImportData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_import_data(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + import_configs = aiplatform_v1beta1.ImportDataConfig() + import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + import_configs.import_schema_uri = "import_schema_uri_value" + + request = aiplatform_v1beta1.ImportDataRequest( + name="name_value", + import_configs=import_configs, + ) + + # Make the request + operation = client.import_data(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ImportData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py new file mode 100644 index 0000000000..efd32c9b48 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ListAnnotations_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_annotations(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ListAnnotations_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py new file mode 100644 index 0000000000..a81eb58758 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAnnotations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ListAnnotations_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_annotations(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListAnnotationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_annotations(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ListAnnotations_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py new file mode 100644 index 0000000000..69d9fb12d3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ListDataItems_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_data_items(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ListDataItems_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py new file mode 100644 index 0000000000..7a67c0207c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataItems +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ListDataItems_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_data_items(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataItemsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_items(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ListDataItems_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py new file mode 100644 index 0000000000..8848e920d9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ListDatasets_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_datasets(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ListDatasets_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py new file mode 100644 index 0000000000..7dce202e99 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDatasets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_ListDatasets_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_datasets(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDatasetsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_datasets(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_ListDatasets_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py new file mode 100644 index 0000000000..eafbcd3e12 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_UpdateDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceAsyncClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = await client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_UpdateDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py new file mode 100644 index 0000000000..03b78f94e8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_DatasetService_UpdateDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_dataset(): + # Create a client + client = aiplatform_v1beta1.DatasetServiceClient() + + # Initialize request argument(s) + dataset = aiplatform_v1beta1.Dataset() + dataset.display_name = "display_name_value" + dataset.metadata_schema_uri = "metadata_schema_uri_value" + dataset.metadata.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.UpdateDatasetRequest( + dataset=dataset, + ) + + # Make the request + response = client.update_dataset(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_DatasetService_UpdateDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py new file mode 100644 index 0000000000..e39f00f51b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py new file mode 100644 index 0000000000..4f8a59f4cd --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateEndpointRequest( + parent="parent_value", + endpoint=endpoint, + ) + + # Make the request + operation = client.create_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py new file mode 100644 index 0000000000..29f9baf561 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py new file mode 100644 index 0000000000..56d5e4eaa5 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py new file mode 100644 index 0000000000..93cad9575d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_DeployModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_deploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1beta1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1beta1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_DeployModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py new file mode 100644 index 0000000000..9dba9f27f3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_DeployModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_deploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + deployed_model = aiplatform_v1beta1.DeployedModel() + deployed_model.dedicated_resources.min_replica_count = 1803 + deployed_model.model = "model_value" + + request = aiplatform_v1beta1.DeployModelRequest( + endpoint="endpoint_value", + deployed_model=deployed_model, + ) + + # Make the request + operation = client.deploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_DeployModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py new file mode 100644 index 0000000000..602de3b4bb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_GetEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_GetEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py new file mode 100644 index 0000000000..79df67a22e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_GetEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_GetEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py new file mode 100644 index 0000000000..9bacfc4230 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_ListEndpoints_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_endpoints(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_ListEndpoints_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py new file mode 100644 index 0000000000..1a39d2b7cb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_endpoints(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py new file mode 100644 index 0000000000..0838ddbc7c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_UndeployModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_undeploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_UndeployModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py new file mode 100644 index 0000000000..03443b2c08 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_UndeployModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_undeploy_model(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployModelRequest( + endpoint="endpoint_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + operation = client.undeploy_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_UndeployModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py new file mode 100644 index 0000000000..42e01ce142 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceAsyncClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = await client.update_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py new file mode 100644 index 0000000000..c456bc03f4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_endpoint(): + # Create a client + client = aiplatform_v1beta1.EndpointServiceClient() + + # Initialize request argument(s) + endpoint = aiplatform_v1beta1.Endpoint() + endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateEndpointRequest( + endpoint=endpoint, + ) + + # Make the request + response = client.update_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py new file mode 100644 index 0000000000..f7c6963218 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = await client.read_feature_values(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py new file mode 100644 index 0000000000..1098ded9e4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_id="entity_id_value", + feature_selector=feature_selector, + ) + + # Make the request + response = client.read_feature_values(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py new file mode 100644 index 0000000000..b85bacf44b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = await client.streaming_read_feature_values(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py new file mode 100644 index 0000000000..70e59a5409 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamingReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_streaming_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient() + + # Initialize request argument(s) + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest( + entity_type="entity_type_value", + entity_ids=['entity_ids_value_1', 'entity_ids_value_2'], + feature_selector=feature_selector, + ) + + # Make the request + stream = client.streaming_read_feature_values(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py new file mode 100644 index 0000000000..03c577cde7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_create_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1beta1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py new file mode 100644 index 0000000000..1ed56a5a37 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_create_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateFeatureRequest() + requests.parent = "parent_value" + requests.feature.value_type = "BYTES" + requests.feature_id = "feature_id_value" + + request = aiplatform_v1beta1.BatchCreateFeaturesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + operation = client.batch_create_features(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py new file mode 100644 index 0000000000..be6bb266ef --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1beta1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1beta1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py new file mode 100644 index 0000000000..fee4dfdfc1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_read_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + csv_read_instances = aiplatform_v1beta1.CsvSource() + csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + entity_type_specs = aiplatform_v1beta1.EntityTypeSpec() + entity_type_specs.entity_type_id = "entity_type_id_value" + entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.BatchReadFeatureValuesRequest( + csv_read_instances=csv_read_instances, + featurestore="featurestore_value", + destination=destination, + entity_type_specs=entity_type_specs, + ) + + # Make the request + operation = client.batch_read_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py new file mode 100644 index 0000000000..bd07902f04 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py new file mode 100644 index 0000000000..0dc8e423b1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateEntityTypeRequest( + parent="parent_value", + entity_type_id="entity_type_id_value", + ) + + # Make the request + operation = client.create_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py new file mode 100644 index 0000000000..9e6f792ee9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py new file mode 100644 index 0000000000..3c8a040e19 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.CreateFeatureRequest( + parent="parent_value", + feature=feature, + feature_id="feature_id_value", + ) + + # Make the request + operation = client.create_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py new file mode 100644 index 0000000000..55c0265e60 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py new file mode 100644 index 0000000000..bae54da7e0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateFeaturestoreRequest( + parent="parent_value", + featurestore_id="featurestore_id_value", + ) + + # Make the request + operation = client.create_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py new file mode 100644 index 0000000000..54f4a4f228 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py new file mode 100644 index 0000000000..45e8bed43e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteEntityTypeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_entity_type(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py new file mode 100644 index 0000000000..162d4f4953 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py new file mode 100644 index 0000000000..a52680308d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeatureRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_feature(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py new file mode 100644 index 0000000000..f6b00a321c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py new file mode 100644 index 0000000000..4ef5be9942 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteFeaturestoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py new file mode 100644 index 0000000000..08d208189a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py new file mode 100644 index 0000000000..11ae90f23e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + destination = aiplatform_v1beta1.FeatureValueDestination() + destination.bigquery_destination.output_uri = "output_uri_value" + + feature_selector = aiplatform_v1beta1.FeatureSelector() + feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2'] + + request = aiplatform_v1beta1.ExportFeatureValuesRequest( + entity_type="entity_type_value", + destination=destination, + feature_selector=feature_selector, + ) + + # Make the request + operation = client.export_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py new file mode 100644 index 0000000000..55abe4ca8c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py new file mode 100644 index 0000000000..7b6337b8c7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetEntityTypeRequest( + name="name_value", + ) + + # Make the request + response = client.get_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py new file mode 100644 index 0000000000..51e1f2893a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = await client.get_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py new file mode 100644 index 0000000000..34066681d2 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeatureRequest( + name="name_value", + ) + + # Make the request + response = client.get_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py new file mode 100644 index 0000000000..e6b01ff9df --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_featurestore(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py new file mode 100644 index 0000000000..a370c27282 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetFeaturestoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_featurestore(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py new file mode 100644 index 0000000000..fbb5550b9e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1beta1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1beta1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1beta1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py new file mode 100644 index 0000000000..b368090c97 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportFeatureValues +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_import_feature_values(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + avro_source = aiplatform_v1beta1.AvroSource() + avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + + feature_specs = aiplatform_v1beta1.FeatureSpec() + feature_specs.id = "id_value" + + request = aiplatform_v1beta1.ImportFeatureValuesRequest( + avro_source=avro_source, + feature_time_field="feature_time_field_value", + entity_type="entity_type_value", + feature_specs=feature_specs, + ) + + # Make the request + operation = client.import_feature_values(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py new file mode 100644 index 0000000000..b67b8554d6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_entity_types(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py new file mode 100644 index 0000000000..101da137a4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListEntityTypes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_entity_types(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListEntityTypesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_entity_types(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_async.py new file mode 100644 index 0000000000..87bae8d617 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py new file mode 100644 index 0000000000..97f95073aa --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_features(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py new file mode 100644 index 0000000000..fab4bc0252 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_featurestores(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py new file mode 100644 index 0000000000..bae9106bd1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListFeaturestores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_featurestores(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListFeaturestoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_featurestores(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_async.py new file mode 100644 index 0000000000..b5b05cd290 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py new file mode 100644 index 0000000000..422eb6df31 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchFeatures +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_search_features(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchFeaturesRequest( + location="location_value", + ) + + # Make the request + page_result = client.search_features(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py new file mode 100644 index 0000000000..7edcae5f25 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateEntityTypeRequest( + ) + + # Make the request + response = await client.update_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py new file mode 100644 index 0000000000..cead93e903 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateEntityType +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_entity_type(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateEntityTypeRequest( + ) + + # Make the request + response = client.update_entity_type(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py new file mode 100644 index 0000000000..403b48dff6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = await client.update_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py new file mode 100644 index 0000000000..4e0513690d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeature +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_feature(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + feature = aiplatform_v1beta1.Feature() + feature.value_type = "BYTES" + + request = aiplatform_v1beta1.UpdateFeatureRequest( + feature=feature, + ) + + # Make the request + response = client.update_feature(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py new file mode 100644 index 0000000000..6137c45be4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py new file mode 100644 index 0000000000..66fc77cda6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateFeaturestore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_featurestore(): + # Create a client + client = aiplatform_v1beta1.FeaturestoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateFeaturestoreRequest( + ) + + # Make the request + operation = client.update_featurestore(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py new file mode 100644 index 0000000000..3c6fff682e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py new file mode 100644 index 0000000000..bac6e73ba6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexEndpointRequest( + parent="parent_value", + index_endpoint=index_endpoint, + ) + + # Make the request + operation = client.create_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py new file mode 100644 index 0000000000..df6eae815e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py new file mode 100644 index 0000000000..0f0eb7e4cd --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexEndpointRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index_endpoint(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py new file mode 100644 index 0000000000..578032ecab --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_deploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py new file mode 100644 index 0000000000..c3100783d3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_deploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.DeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.deploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py new file mode 100644 index 0000000000..1083e72b06 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py new file mode 100644 index 0000000000..bce70eed7a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexEndpointRequest( + name="name_value", + ) + + # Make the request + response = client.get_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py new file mode 100644 index 0000000000..dd0b43a9fb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py new file mode 100644 index 0000000000..dc50814ddb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexEndpoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_index_endpoints(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexEndpointsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_index_endpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py new file mode 100644 index 0000000000..fa6f730deb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MutateDeployedIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py new file mode 100644 index 0000000000..af37ccb450 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MutateDeployedIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_mutate_deployed_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + deployed_index = aiplatform_v1beta1.DeployedIndex() + deployed_index.id = "id_value" + deployed_index.index = "index_value" + + request = aiplatform_v1beta1.MutateDeployedIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index=deployed_index, + ) + + # Make the request + operation = client.mutate_deployed_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py new file mode 100644 index 0000000000..c62dd2408b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_undeploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py new file mode 100644 index 0000000000..8605878dbe --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeployIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_undeploy_index(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UndeployIndexRequest( + index_endpoint="index_endpoint_value", + deployed_index_id="deployed_index_id_value", + ) + + # Make the request + operation = client.undeploy_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py new file mode 100644 index 0000000000..9c29e14327 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = await client.update_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py new file mode 100644 index 0000000000..c3c213c461 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndexEndpoint +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_index_endpoint(): + # Create a client + client = aiplatform_v1beta1.IndexEndpointServiceClient() + + # Initialize request argument(s) + index_endpoint = aiplatform_v1beta1.IndexEndpoint() + index_endpoint.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexEndpointRequest( + index_endpoint=index_endpoint, + ) + + # Make the request + response = client.update_index_endpoint(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_async.py new file mode 100644 index 0000000000..4424f64bc7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_CreateIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_CreateIndex_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_sync.py new file mode 100644 index 0000000000..b741299dab --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_create_index_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_CreateIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateIndexRequest( + parent="parent_value", + index=index, + ) + + # Make the request + operation = client.create_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_CreateIndex_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_async.py new file mode 100644 index 0000000000..02c604c849 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_DeleteIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_DeleteIndex_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_sync.py new file mode 100644 index 0000000000..b55e86a2d9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_delete_index_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_DeleteIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteIndexRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_DeleteIndex_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_async.py new file mode 100644 index 0000000000..5be45b1802 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_GetIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = await client.get_index(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_GetIndex_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_sync.py new file mode 100644 index 0000000000..5827bff3e4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_get_index_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_GetIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetIndexRequest( + name="name_value", + ) + + # Make the request + response = client.get_index(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_GetIndex_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_async.py new file mode 100644 index 0000000000..011938c402 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_ListIndexes_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_indexes(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_ListIndexes_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_sync.py new file mode 100644 index 0000000000..8d64d779c9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_list_indexes_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListIndexes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_ListIndexes_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_indexes(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListIndexesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_indexes(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_ListIndexes_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_async.py new file mode 100644 index 0000000000..be4999a8c7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_UpdateIndex_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceAsyncClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_UpdateIndex_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_sync.py new file mode 100644 index 0000000000..dcc2cb5263 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_index_service_update_index_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateIndex +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_IndexService_UpdateIndex_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_index(): + # Create a client + client = aiplatform_v1beta1.IndexServiceClient() + + # Initialize request argument(s) + index = aiplatform_v1beta1.Index() + index.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateIndexRequest( + index=index, + ) + + # Make the request + operation = client.update_index(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_IndexService_UpdateIndex_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py new file mode 100644 index 0000000000..ba03ab89e8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py new file mode 100644 index 0000000000..3121919254 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_batch_prediction_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py new file mode 100644 index 0000000000..88ad6ef326 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_custom_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelCustomJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py new file mode 100644 index 0000000000..512422f1ae --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelCustomJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_custom_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelCustomJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py new file mode 100644 index 0000000000..c9a1df76d9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py new file mode 100644 index 0000000000..a6e9a38057 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_data_labeling_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..3b8639f0ea --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..12208f4859 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_hyperparameter_tuning_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py new file mode 100644 index 0000000000..a9fc8f3759 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1beta1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = await client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py new file mode 100644 index 0000000000..722d9299ef --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob() + batch_prediction_job.display_name = "display_name_value" + batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2'] + batch_prediction_job.input_config.instances_format = "instances_format_value" + batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value" + batch_prediction_job.output_config.predictions_format = "predictions_format_value" + + request = aiplatform_v1beta1.CreateBatchPredictionJobRequest( + parent="parent_value", + batch_prediction_job=batch_prediction_job, + ) + + # Make the request + response = client.create_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_async.py new file mode 100644 index 0000000000..6351151c93 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1beta1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = await client.create_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateCustomJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py new file mode 100644 index 0000000000..8b33f1ba39 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + custom_job = aiplatform_v1beta1.CustomJob() + custom_job.display_name = "display_name_value" + custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateCustomJobRequest( + parent="parent_value", + custom_job=custom_job, + ) + + # Make the request + response = client.create_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateCustomJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py new file mode 100644 index 0000000000..562c7544d7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1beta1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = await client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py new file mode 100644 index 0000000000..5b2d75e1fb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + data_labeling_job = aiplatform_v1beta1.DataLabelingJob() + data_labeling_job.display_name = "display_name_value" + data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2'] + data_labeling_job.labeler_count = 1375 + data_labeling_job.instruction_uri = "instruction_uri_value" + data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value" + data_labeling_job.inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateDataLabelingJobRequest( + parent="parent_value", + data_labeling_job=data_labeling_job, + ) + + # Make the request + response = client.create_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..a9efcb8325 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = await client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..897a527b33 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob() + hyperparameter_tuning_job.display_name = "display_name_value" + hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value" + hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE" + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96 + hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962 + hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value" + hyperparameter_tuning_job.max_trial_count = 1609 + hyperparameter_tuning_job.parallel_trial_count = 2128 + hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value" + + request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest( + parent="parent_value", + hyperparameter_tuning_job=hyperparameter_tuning_job, + ) + + # Make the request + response = client.create_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..b3254041ce --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = await client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..dcfbd6d5f6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest( + parent="parent_value", + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + response = client.create_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py new file mode 100644 index 0000000000..5da9337e99 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py new file mode 100644 index 0000000000..d629433bff --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_batch_prediction_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py new file mode 100644 index 0000000000..24f6d4cfd1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteCustomJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py new file mode 100644 index 0000000000..6e0f0bd7b4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteCustomJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_custom_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteCustomJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py new file mode 100644 index 0000000000..ed1b757258 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py new file mode 100644 index 0000000000..2eab38d6b9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_data_labeling_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..988f314639 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..fa37df05fa --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_hyperparameter_tuning_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..964cd314f6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..cf618dd645 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py new file mode 100644 index 0000000000..0dd2177c0c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py new file mode 100644 index 0000000000..9b239136d3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBatchPredictionJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_batch_prediction_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetBatchPredictionJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_batch_prediction_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_async.py new file mode 100644 index 0000000000..50e3451cdd --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetCustomJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetCustomJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py new file mode 100644 index 0000000000..24b599eb63 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCustomJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetCustomJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_custom_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetCustomJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_custom_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetCustomJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py new file mode 100644 index 0000000000..4b99f7417b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py new file mode 100644 index 0000000000..b50e4560cf --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDataLabelingJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_data_labeling_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetDataLabelingJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_data_labeling_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py new file mode 100644 index 0000000000..d9442b24e4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py new file mode 100644 index 0000000000..81e81ed50e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetHyperparameterTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_hyperparameter_tuning_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_hyperparameter_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..04c4807aed --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..98c2a22367 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_deployment_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py new file mode 100644 index 0000000000..4823327421 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py new file mode 100644 index 0000000000..2ec0ea59b2 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBatchPredictionJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_batch_prediction_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListBatchPredictionJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_batch_prediction_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py new file mode 100644 index 0000000000..6a337f9030 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListCustomJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListCustomJobs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py new file mode 100644 index 0000000000..43b3ea4593 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListCustomJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListCustomJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_custom_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListCustomJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_custom_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListCustomJobs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py new file mode 100644 index 0000000000..65870edc86 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py new file mode 100644 index 0000000000..757ba1fa2d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDataLabelingJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_data_labeling_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListDataLabelingJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_data_labeling_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py new file mode 100644 index 0000000000..ba8f21ecfe --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py new file mode 100644 index 0000000000..7d884a8d62 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHyperparameterTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_hyperparameter_tuning_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hyperparameter_tuning_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py new file mode 100644 index 0000000000..878effea77 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py new file mode 100644 index 0000000000..6a502bca9a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelDeploymentMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_deployment_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_deployment_monitoring_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..27721a6378 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..2053f9e73c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PauseModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_pause_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.pause_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..7980c3cd12 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + await client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..d79d66feda --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResumeModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_resume_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest( + name="name_value", + ) + + # Make the request + client.resume_model_deployment_monitoring_job(request=request) + + +# [END aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py new file mode 100644 index 0000000000..156bd967ba --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py new file mode 100644 index 0000000000..b7cf0a6b90 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelDeploymentMonitoringStatsAnomalies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_search_model_deployment_monitoring_stats_anomalies(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest( + model_deployment_monitoring_job="model_deployment_monitoring_job_value", + deployed_model_id="deployed_model_id_value", + ) + + # Make the request + page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py new file mode 100644 index 0000000000..6b0a493fa3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceAsyncClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py new file mode 100644 index 0000000000..5d989ac975 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelDeploymentMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_model_deployment_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.JobServiceClient() + + # Initialize request argument(s) + model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob() + model_deployment_monitoring_job.display_name = "display_name_value" + model_deployment_monitoring_job.endpoint = "endpoint_value" + + request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest( + model_deployment_monitoring_job=model_deployment_monitoring_job, + ) + + # Make the request + operation = client.update_model_deployment_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py new file mode 100644 index 0000000000..3d9f6ea005 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py new file mode 100644 index 0000000000..562437ffc4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextArtifactsAndExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_context_artifacts_and_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_artifacts_and_executions(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py new file mode 100644 index 0000000000..a143de0168 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_AddContextChildren_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_context_children(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = await client.add_context_children(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_AddContextChildren_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py new file mode 100644 index 0000000000..c2edee20bf --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddContextChildren +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_AddContextChildren_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_context_children(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddContextChildrenRequest( + context="context_value", + ) + + # Make the request + response = client.add_context_children(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_AddContextChildren_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py new file mode 100644 index 0000000000..f248dc6369 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_execution_events(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.add_execution_events(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py new file mode 100644 index 0000000000..51bbbb020f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddExecutionEvents +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_execution_events(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddExecutionEventsRequest( + execution="execution_value", + ) + + # Make the request + response = client.add_execution_events(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py new file mode 100644 index 0000000000..64cbb8a8cd --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateArtifact_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py new file mode 100644 index 0000000000..f7c6009dd4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateArtifactRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateArtifact_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_async.py new file mode 100644 index 0000000000..b7ce1f9feb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateContext_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_sync.py new file mode 100644 index 0000000000..7aa6f4207c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_context_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateContextRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateContext_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_async.py new file mode 100644 index 0000000000..bed6fd6988 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateExecution_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py new file mode 100644 index 0000000000..c44dd94057 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateExecutionRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateExecution_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py new file mode 100644 index 0000000000..a6ceab6351 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1beta1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1beta1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = await client.create_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py new file mode 100644 index 0000000000..582ffa9513 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + metadata_schema = aiplatform_v1beta1.MetadataSchema() + metadata_schema.schema = "schema_value" + + request = aiplatform_v1beta1.CreateMetadataSchemaRequest( + parent="parent_value", + metadata_schema=metadata_schema, + ) + + # Make the request + response = client.create_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py new file mode 100644 index 0000000000..5aa7ec4ee4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py new file mode 100644 index 0000000000..9469189ba3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateMetadataStoreRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py new file mode 100644 index 0000000000..127f903d9e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py new file mode 100644 index 0000000000..5e6ef6a279 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteArtifactRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_artifact(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_async.py new file mode 100644 index 0000000000..c05d6fe148 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteContext_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py new file mode 100644 index 0000000000..8d68e0e310 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteContextRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_context(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteContext_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py new file mode 100644 index 0000000000..468ec45eed --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteExecution_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py new file mode 100644 index 0000000000..d0d0a201da --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExecutionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_execution(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteExecution_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py new file mode 100644 index 0000000000..08414e9312 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py new file mode 100644 index 0000000000..3aa38e9029 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteMetadataStoreRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_metadata_store(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py new file mode 100644 index 0000000000..4923dd6f0d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = await client.get_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetArtifact_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py new file mode 100644 index 0000000000..c021c7ef84 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetArtifactRequest( + name="name_value", + ) + + # Make the request + response = client.get_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetArtifact_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_async.py new file mode 100644 index 0000000000..8e09fd86b8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = await client.get_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetContext_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_sync.py new file mode 100644 index 0000000000..59eee30ad5 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_context_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetContextRequest( + name="name_value", + ) + + # Make the request + response = client.get_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetContext_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_async.py new file mode 100644 index 0000000000..faf0607d94 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetExecution_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py new file mode 100644 index 0000000000..3b235ebbf4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExecutionRequest( + name="name_value", + ) + + # Make the request + response = client.get_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetExecution_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py new file mode 100644 index 0000000000..1f51b8c23c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py new file mode 100644 index 0000000000..4a9996172b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataSchema +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_metadata_schema(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataSchemaRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_schema(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py new file mode 100644 index 0000000000..df7987842a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = await client.get_metadata_store(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py new file mode 100644 index 0000000000..9e3cbd2df1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMetadataStore +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_metadata_store(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetMetadataStoreRequest( + name="name_value", + ) + + # Make the request + response = client.get_metadata_store(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py new file mode 100644 index 0000000000..3f4791abb6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListArtifacts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListArtifacts_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py new file mode 100644 index 0000000000..b2f11fe505 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListArtifacts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListArtifactsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_artifacts(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListArtifacts_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py new file mode 100644 index 0000000000..62fa6a7e9c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListContexts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListContexts_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py new file mode 100644 index 0000000000..0169b67705 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListContexts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListContextsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_contexts(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListContexts_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_async.py new file mode 100644 index 0000000000..10595404a0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListExecutions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListExecutions_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py new file mode 100644 index 0000000000..fc756c40cd --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListExecutions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExecutionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_executions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListExecutions_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py new file mode 100644 index 0000000000..564ba0aae0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py new file mode 100644 index 0000000000..d3f897bb7c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataSchemas +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_metadata_schemas(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataSchemasRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_schemas(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py new file mode 100644 index 0000000000..4e39c073f0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py new file mode 100644 index 0000000000..a9f88c782c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMetadataStores +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_metadata_stores(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListMetadataStoresRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_metadata_stores(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py new file mode 100644 index 0000000000..ec465a555b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py new file mode 100644 index 0000000000..ff0974d79f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeArtifacts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_purge_artifacts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeArtifactsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_artifacts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py new file mode 100644 index 0000000000..74444d67b0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_PurgeContexts_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_purge_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_PurgeContexts_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py new file mode 100644 index 0000000000..dcd54aaae6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_PurgeContexts_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_purge_contexts(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeContextsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_contexts(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_PurgeContexts_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py new file mode 100644 index 0000000000..8b92f6cf34 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_purge_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py new file mode 100644 index 0000000000..3f02f62c57 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PurgeExecutions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_purge_executions(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.PurgeExecutionsRequest( + parent="parent_value", + filter="filter_value", + ) + + # Make the request + operation = client.purge_executions(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py new file mode 100644 index 0000000000..7cd3db523a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = await client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py new file mode 100644 index 0000000000..28ba2c5ce4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryArtifactLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_query_artifact_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest( + artifact="artifact_value", + ) + + # Make the request + response = client.query_artifact_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py new file mode 100644 index 0000000000..d48f893219 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = await client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py new file mode 100644 index 0000000000..9d766eacd4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryContextLineageSubgraph +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_query_context_lineage_subgraph(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest( + context="context_value", + ) + + # Make the request + response = client.query_context_lineage_subgraph(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py new file mode 100644 index 0000000000..f3d8fdeb60 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = await client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py new file mode 100644 index 0000000000..370bfd96c1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExecutionInputsAndOutputs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_query_execution_inputs_and_outputs(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest( + execution="execution_value", + ) + + # Make the request + response = client.query_execution_inputs_and_outputs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py new file mode 100644 index 0000000000..74658ab8a7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateArtifactRequest( + ) + + # Make the request + response = await client.update_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py new file mode 100644 index 0000000000..bdfac1385c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateArtifact +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_artifact(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateArtifactRequest( + ) + + # Make the request + response = client.update_artifact(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_async.py new file mode 100644 index 0000000000..f75a50c148 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_UpdateContext_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateContextRequest( + ) + + # Make the request + response = await client.update_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_UpdateContext_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_sync.py new file mode 100644 index 0000000000..c4c9c04772 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_context_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateContext +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_UpdateContext_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_context(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateContextRequest( + ) + + # Make the request + response = client.update_context(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_UpdateContext_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_async.py new file mode 100644 index 0000000000..2b3ae95e3a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_UpdateExecution_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExecutionRequest( + ) + + # Make the request + response = await client.update_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_UpdateExecution_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py new file mode 100644 index 0000000000..16d9a5edfa --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExecution +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MetadataService_UpdateExecution_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_execution(): + # Create a client + client = aiplatform_v1beta1.MetadataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExecutionRequest( + ) + + # Make the request + response = client.update_execution(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MetadataService_UpdateExecution_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py new file mode 100644 index 0000000000..f6cad564df --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1beta1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py new file mode 100644 index 0000000000..bc1e2fc412 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchMigrateResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_migrate_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceClient() + + # Initialize request argument(s) + migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest() + migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value" + migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value" + + request = aiplatform_v1beta1.BatchMigrateResourcesRequest( + parent="parent_value", + migrate_resource_requests=migrate_resource_requests, + ) + + # Make the request + operation = client.batch_migrate_resources(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py new file mode 100644 index 0000000000..d3ac5618e0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py new file mode 100644 index 0000000000..af50eb80b4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchMigratableResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_search_migratable_resources(): + # Create a client + client = aiplatform_v1beta1.MigrationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchMigratableResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.search_migratable_resources(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_async.py new file mode 100644 index 0000000000..0ad8dd565f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_DeleteModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_DeleteModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_sync.py new file mode 100644 index 0000000000..657d1a6414 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_DeleteModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_DeleteModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_async.py new file mode 100644 index 0000000000..540cfe1fb1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelVersion +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py new file mode 100644 index 0000000000..d8ee80ef9d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelVersion +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model_version(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelVersionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_version(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_async.py new file mode 100644 index 0000000000..f3b7465316 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ExportModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ExportModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_sync.py new file mode 100644 index 0000000000..f6dc889c0c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_export_model_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ExportModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportModelRequest( + name="name_value", + ) + + # Make the request + operation = client.export_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ExportModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_async.py new file mode 100644 index 0000000000..f1b8db001b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_GetModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_GetModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py new file mode 100644 index 0000000000..0bdef4d038 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py new file mode 100644 index 0000000000..007c432ac0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py new file mode 100644 index 0000000000..5f0f61ad82 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluationSlice +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_evaluation_slice(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationSliceRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation_slice(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py new file mode 100644 index 0000000000..9accf1c4c0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelEvaluationRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_sync.py new file mode 100644 index 0000000000..fe080717ba --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_get_model_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_GetModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelRequest( + name="name_value", + ) + + # Make the request + response = client.get_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_GetModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py new file mode 100644 index 0000000000..67a59cf5e5 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = await client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py new file mode 100644 index 0000000000..bb92dfdaea --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py new file mode 100644 index 0000000000..b0f086c918 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py new file mode 100644 index 0000000000..3d858362a7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluationSlices +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_evaluation_slices(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluation_slices(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py new file mode 100644 index 0000000000..4c2d6ce419 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py new file mode 100644 index 0000000000..d5b3ac952c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelEvaluations +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_evaluations(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelEvaluationsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_evaluations(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_async.py new file mode 100644 index 0000000000..a5b5a951a3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelVersions_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelVersions_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py new file mode 100644 index 0000000000..d717dd002d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelVersions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_versions(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelVersionsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_versions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_async.py new file mode 100644 index 0000000000..aa7d4f8ba2 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModels_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_models(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModels_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_sync.py new file mode 100644 index 0000000000..c5e2be1d4f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_models_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ListModels_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_models(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ListModels_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py new file mode 100644 index 0000000000..517425df71 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MergeVersionAliases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = await client.merge_version_aliases(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py new file mode 100644 index 0000000000..fe8cb60202 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MergeVersionAliases +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_merge_version_aliases(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.MergeVersionAliasesRequest( + name="name_value", + version_aliases=['version_aliases_value_1', 'version_aliases_value_2'], + ) + + # Make the request + response = client.merge_version_aliases(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py new file mode 100644 index 0000000000..cc070a4f30 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExplanationDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_explanation_dataset(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExplanationDatasetRequest( + model="model_value", + ) + + # Make the request + operation = client.update_explanation_dataset(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py new file mode 100644 index 0000000000..3f17d7ca8d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExplanationDataset +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_explanation_dataset(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateExplanationDatasetRequest( + model="model_value", + ) + + # Make the request + operation = client.update_explanation_dataset(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_async.py new file mode 100644 index 0000000000..9a7bf7df62 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_UpdateModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = await client.update_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_UpdateModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_sync.py new file mode 100644 index 0000000000..e0ed3b7a65 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_update_model_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_UpdateModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateModelRequest( + model=model, + ) + + # Make the request + response = client.update_model(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_UpdateModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_async.py new file mode 100644 index 0000000000..b05244b467 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_UploadModel_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_upload_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_UploadModel_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_sync.py new file mode 100644 index 0000000000..df7f1b66da --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_model_service_upload_model_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadModel +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_UploadModel_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_upload_model(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + model = aiplatform_v1beta1.Model() + model.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadModelRequest( + parent="parent_value", + model=model, + ) + + # Make the request + operation = client.upload_model(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_UploadModel_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py new file mode 100644 index 0000000000..1ae9453393 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_pipeline_job(request=request) + + +# [END aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py new file mode 100644 index 0000000000..a48304dd4b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelPipelineJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_pipeline_job(request=request) + + +# [END aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py new file mode 100644 index 0000000000..818bbf5fbf --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + await client.cancel_training_pipeline(request=request) + + +# [END aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py new file mode 100644 index 0000000000..24a6fc3e58 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_cancel_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CancelTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + client.cancel_training_pipeline(request=request) + + +# [END aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py new file mode 100644 index 0000000000..c6eb72f73a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py new file mode 100644 index 0000000000..ee76472f93 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreatePipelineJobRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py new file mode 100644 index 0000000000..d7ce61f6ea --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1beta1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = await client.create_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py new file mode 100644 index 0000000000..fecf6cf314 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + training_pipeline = aiplatform_v1beta1.TrainingPipeline() + training_pipeline.display_name = "display_name_value" + training_pipeline.training_task_definition = "training_task_definition_value" + training_pipeline.training_task_inputs.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.CreateTrainingPipelineRequest( + parent="parent_value", + training_pipeline=training_pipeline, + ) + + # Make the request + response = client.create_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py new file mode 100644 index 0000000000..64aafe1369 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py new file mode 100644 index 0000000000..fc8b5ea2fa --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeletePipelineJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_pipeline_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py new file mode 100644 index 0000000000..097638e8e9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py new file mode 100644 index 0000000000..1a80bf60c1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_training_pipeline(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py new file mode 100644 index 0000000000..6eba96b5d1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py new file mode 100644 index 0000000000..37984b5bf8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPipelineJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_pipeline_job(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetPipelineJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_pipeline_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py new file mode 100644 index 0000000000..5e6de6843e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = await client.get_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py new file mode 100644 index 0000000000..ba5edd03ad --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrainingPipeline +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_training_pipeline(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrainingPipelineRequest( + name="name_value", + ) + + # Make the request + response = client.get_training_pipeline(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py new file mode 100644 index 0000000000..8318dc13d8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py new file mode 100644 index 0000000000..ec679372b3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPipelineJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_pipeline_jobs(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListPipelineJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_pipeline_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py new file mode 100644 index 0000000000..b0a84fb3a3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py new file mode 100644 index 0000000000..f69b7c0ca4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrainingPipelines +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_training_pipelines(): + # Create a client + client = aiplatform_v1beta1.PipelineServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrainingPipelinesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_training_pipelines(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_async.py new file mode 100644 index 0000000000..1abbae40a1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_Explain_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_explain(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.explain(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_Explain_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_sync.py new file mode 100644 index 0000000000..aa821d19d4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_explain_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Explain +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_Explain_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_explain(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.ExplainRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.explain(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_Explain_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_async.py new file mode 100644 index 0000000000..38da7d92c4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_Predict_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = await client.predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_Predict_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_sync.py new file mode 100644 index 0000000000..8e0a96f195 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_predict_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Predict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_Predict_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + instances = aiplatform_v1beta1.Value() + instances.null_value = "NULL_VALUE" + + request = aiplatform_v1beta1.PredictRequest( + endpoint="endpoint_value", + instances=instances, + ) + + # Make the request + response = client.predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_Predict_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py new file mode 100644 index 0000000000..b347682562 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_RawPredict_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_raw_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = await client.raw_predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_RawPredict_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py new file mode 100644 index 0000000000..20c7286fa0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RawPredict +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_RawPredict_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_raw_predict(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RawPredictRequest( + endpoint="endpoint_value", + ) + + # Make the request + response = client.raw_predict(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_RawPredict_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py new file mode 100644 index 0000000000..4110e3a04f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py new file mode 100644 index 0000000000..1569cc2f2a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateSpecialistPoolRequest( + parent="parent_value", + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.create_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py new file mode 100644 index 0000000000..6743cf3a70 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py new file mode 100644 index 0000000000..5e02eafb73 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py new file mode 100644 index 0000000000..a36e2e76aa --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = await client.get_specialist_pool(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py new file mode 100644 index 0000000000..360e6e7423 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetSpecialistPoolRequest( + name="name_value", + ) + + # Make the request + response = client.get_specialist_pool(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py new file mode 100644 index 0000000000..d35fe10b3f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py new file mode 100644 index 0000000000..31270e3d7f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSpecialistPools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_specialist_pools(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListSpecialistPoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_specialist_pools(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py new file mode 100644 index 0000000000..fa0d8cd115 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py new file mode 100644 index 0000000000..9e314cdd0a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSpecialistPool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_specialist_pool(): + # Create a client + client = aiplatform_v1beta1.SpecialistPoolServiceClient() + + # Initialize request argument(s) + specialist_pool = aiplatform_v1beta1.SpecialistPool() + specialist_pool.name = "name_value" + specialist_pool.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateSpecialistPoolRequest( + specialist_pool=specialist_pool, + ) + + # Make the request + operation = client.update_specialist_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py new file mode 100644 index 0000000000..5937240bab --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py new file mode 100644 index 0000000000..a829aa595f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_create_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardRunRequest() + requests.parent = "parent_value" + requests.tensorboard_run.display_name = "display_name_value" + requests.tensorboard_run_id = "tensorboard_run_id_value" + + request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_runs(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py new file mode 100644 index 0000000000..409e42451c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = await client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..4d6dff304c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchCreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest() + requests.parent = "parent_value" + requests.tensorboard_time_series.display_name = "display_name_value" + requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest( + parent="parent_value", + requests=requests, + ) + + # Make the request + response = client.batch_create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..07ee0f2040 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = await client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..c14bbb21d9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_batch_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest( + tensorboard="tensorboard_value", + time_series=['time_series_value_1', 'time_series_value_2'], + ) + + # Make the request + response = client.batch_read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py new file mode 100644 index 0000000000..27ffff4cf4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py new file mode 100644 index 0000000000..a3ee2e22d4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = await client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..86b25f8d5a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTensorboardExperimentRequest( + parent="parent_value", + tensorboard_experiment_id="tensorboard_experiment_id_value", + ) + + # Make the request + response = client.create_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py new file mode 100644 index 0000000000..4cf5a16922 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = await client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py new file mode 100644 index 0000000000..81e50e4b46 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRunRequest( + parent="parent_value", + tensorboard_run=tensorboard_run, + tensorboard_run_id="tensorboard_run_id_value", + ) + + # Make the request + response = client.create_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py new file mode 100644 index 0000000000..021008b4b1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateTensorboardRequest( + parent="parent_value", + tensorboard=tensorboard, + ) + + # Make the request + operation = client.create_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py new file mode 100644 index 0000000000..27ac95a98c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..89f205c530 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest( + parent="parent_value", + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.create_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py new file mode 100644 index 0000000000..0fad128744 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py new file mode 100644 index 0000000000..6a00ba6551 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..05bb0f8e3d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_experiment(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py new file mode 100644 index 0000000000..a9c126bdbf --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py new file mode 100644 index 0000000000..7de2b8038a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRunRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_run(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py new file mode 100644 index 0000000000..e9181c8670 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py new file mode 100644 index 0000000000..1427937ce3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..97d5788320 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_tensorboard_time_series(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..9207970d60 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..bad73717bc --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExportTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_export_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + page_result = client.export_tensorboard_time_series_data(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py new file mode 100644 index 0000000000..76db628787 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py new file mode 100644 index 0000000000..59eaefa0a5 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..a4b0be05d6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardExperimentRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py new file mode 100644 index 0000000000..3b6e6affb3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py new file mode 100644 index 0000000000..0a68e3959b --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRunRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py new file mode 100644 index 0000000000..26a5a65230 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py new file mode 100644 index 0000000000..411c0af108 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..10c0d3f143 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest( + name="name_value", + ) + + # Make the request + response = client.get_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py new file mode 100644 index 0000000000..4373a11385 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardExperiments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py new file mode 100644 index 0000000000..022518a974 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardExperiments +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboard_experiments(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardExperimentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_experiments(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py new file mode 100644 index 0000000000..8c0b9f265e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py new file mode 100644 index 0000000000..1e3572ada3 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardRuns +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboard_runs(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardRunsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_runs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py new file mode 100644 index 0000000000..22dd43a6ce --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..9e8deab9e0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboard_time_series(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py new file mode 100644 index 0000000000..3ddaf031a1 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboards +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py new file mode 100644 index 0000000000..9e1ff86334 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTensorboards +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_tensorboards(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTensorboardsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tensorboards(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py new file mode 100644 index 0000000000..4379e6a244 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardBlobData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = await client.read_tensorboard_blob_data(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py new file mode 100644 index 0000000000..ba91617037 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardBlobData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_read_tensorboard_blob_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest( + time_series="time_series_value", + ) + + # Make the request + stream = client.read_tensorboard_blob_data(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py new file mode 100644 index 0000000000..98979860f6 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = await client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py new file mode 100644 index 0000000000..d0d932ad2f --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ReadTensorboardTimeSeriesData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_read_tensorboard_time_series_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest( + tensorboard_time_series="tensorboard_time_series_value", + ) + + # Make the request + response = client.read_tensorboard_time_series_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py new file mode 100644 index 0000000000..55d0b0fa73 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py new file mode 100644 index 0000000000..3910a49162 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = await client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py new file mode 100644 index 0000000000..06a9f08ce7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardExperiment +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard_experiment(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest( + ) + + # Make the request + response = client.update_tensorboard_experiment(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py new file mode 100644 index 0000000000..505dea8e0d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = await client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py new file mode 100644 index 0000000000..2fc6027d82 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardRun +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard_run(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_run = aiplatform_v1beta1.TensorboardRun() + tensorboard_run.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRunRequest( + tensorboard_run=tensorboard_run, + ) + + # Make the request + response = client.update_tensorboard_run(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py new file mode 100644 index 0000000000..bf762f7170 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboard +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard = aiplatform_v1beta1.Tensorboard() + tensorboard.display_name = "display_name_value" + + request = aiplatform_v1beta1.UpdateTensorboardRequest( + tensorboard=tensorboard, + ) + + # Make the request + operation = client.update_tensorboard(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py new file mode 100644 index 0000000000..ece4853f6d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = await client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py new file mode 100644 index 0000000000..68077f0c73 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTensorboardTimeSeries +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_update_tensorboard_time_series(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries() + tensorboard_time_series.display_name = "display_name_value" + tensorboard_time_series.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest( + tensorboard_time_series=tensorboard_time_series, + ) + + # Make the request + response = client.update_tensorboard_time_series(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py new file mode 100644 index 0000000000..51ca02807c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardExperimentData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = await client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py new file mode 100644 index 0000000000..527f950cdb --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardExperimentData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_write_tensorboard_experiment_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest() + write_run_data_requests.tensorboard_run = "tensorboard_run_value" + write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest( + tensorboard_experiment="tensorboard_experiment_value", + write_run_data_requests=write_run_data_requests, + ) + + # Make the request + response = client.write_tensorboard_experiment_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py new file mode 100644 index 0000000000..70e747c88c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardRunData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceAsyncClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1beta1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = await client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py new file mode 100644 index 0000000000..1abe72d656 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for WriteTensorboardRunData +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_write_tensorboard_run_data(): + # Create a client + client = aiplatform_v1beta1.TensorboardServiceClient() + + # Initialize request argument(s) + time_series_data = aiplatform_v1beta1.TimeSeriesData() + time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value" + time_series_data.value_type = "BLOB_SEQUENCE" + + request = aiplatform_v1beta1.WriteTensorboardRunDataRequest( + tensorboard_run="tensorboard_run_value", + time_series_data=time_series_data, + ) + + # Make the request + response = client.write_tensorboard_run_data(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py new file mode 100644 index 0000000000..b59c855d2a --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = await client.add_trial_measurement(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py new file mode 100644 index 0000000000..d713f727f4 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddTrialMeasurement +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_add_trial_measurement(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.AddTrialMeasurementRequest( + trial_name="trial_name_value", + ) + + # Make the request + response = client.add_trial_measurement(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py new file mode 100644 index 0000000000..7fa5901eb7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py new file mode 100644 index 0000000000..8fc432fbd9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckTrialEarlyStoppingState +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_check_trial_early_stopping_state(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest( + trial_name="trial_name_value", + ) + + # Make the request + operation = client.check_trial_early_stopping_state(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py new file mode 100644 index 0000000000..30fc9af533 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CompleteTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_complete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.complete_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CompleteTrial_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py new file mode 100644 index 0000000000..d227e01d15 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CompleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CompleteTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_complete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CompleteTrialRequest( + name="name_value", + ) + + # Make the request + response = client.complete_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CompleteTrial_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_async.py new file mode 100644 index 0000000000..1a923afb48 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CreateStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + study = aiplatform_v1beta1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1beta1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = await client.create_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CreateStudy_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_sync.py new file mode 100644 index 0000000000..396f45c5be --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_study_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CreateStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + study = aiplatform_v1beta1.Study() + study.display_name = "display_name_value" + study.study_spec.metrics.metric_id = "metric_id_value" + study.study_spec.metrics.goal = "MINIMIZE" + study.study_spec.parameters.double_value_spec.min_value = 0.96 + study.study_spec.parameters.double_value_spec.max_value = 0.962 + study.study_spec.parameters.parameter_id = "parameter_id_value" + + request = aiplatform_v1beta1.CreateStudyRequest( + parent="parent_value", + study=study, + ) + + # Make the request + response = client.create_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CreateStudy_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_async.py new file mode 100644 index 0000000000..0fa2502f33 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CreateTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CreateTrial_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py new file mode 100644 index 0000000000..c776e0dcd2 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_CreateTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_create_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateTrialRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_CreateTrial_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_async.py new file mode 100644 index 0000000000..7178b7ffe0 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_DeleteStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + await client.delete_study(request=request) + + +# [END aiplatform_v1beta1_generated_VizierService_DeleteStudy_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py new file mode 100644 index 0000000000..f259e8a0a5 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_DeleteStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteStudyRequest( + name="name_value", + ) + + # Make the request + client.delete_study(request=request) + + +# [END aiplatform_v1beta1_generated_VizierService_DeleteStudy_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py new file mode 100644 index 0000000000..0e2b0638f8 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_DeleteTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + await client.delete_trial(request=request) + + +# [END aiplatform_v1beta1_generated_VizierService_DeleteTrial_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py new file mode 100644 index 0000000000..21fe7a82fc --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_DeleteTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteTrialRequest( + name="name_value", + ) + + # Make the request + client.delete_trial(request=request) + + +# [END aiplatform_v1beta1_generated_VizierService_DeleteTrial_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_async.py new file mode 100644 index 0000000000..ed514cada2 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_GetStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = await client.get_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_GetStudy_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_sync.py new file mode 100644 index 0000000000..dea4a50945 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_study_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_GetStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetStudyRequest( + name="name_value", + ) + + # Make the request + response = client.get_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_GetStudy_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_async.py new file mode 100644 index 0000000000..a8cef49ac7 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_GetTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.get_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_GetTrial_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py new file mode 100644 index 0000000000..b1c32464f9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_GetTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_get_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetTrialRequest( + name="name_value", + ) + + # Make the request + response = client.get_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_GetTrial_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py new file mode 100644 index 0000000000..5ea4f5d74d --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_optimal_trials(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py new file mode 100644 index 0000000000..90f9f409bc --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListOptimalTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_optimal_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListOptimalTrialsRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_optimal_trials(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_async.py new file mode 100644 index 0000000000..6040471485 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_ListStudies_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_studies(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_ListStudies_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py new file mode 100644 index 0000000000..bcd72071d5 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListStudies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_ListStudies_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_studies(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListStudiesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_studies(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_ListStudies_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_async.py new file mode 100644 index 0000000000..a86618e52c --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_ListTrials_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_ListTrials_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py new file mode 100644 index 0000000000..7d68763af9 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_ListTrials_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_list_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListTrialsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_trials(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_ListTrials_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py new file mode 100644 index 0000000000..31d560c32e --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_LookupStudy_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_lookup_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = await client.lookup_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_LookupStudy_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py new file mode 100644 index 0000000000..458b8dfe43 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for LookupStudy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_LookupStudy_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_lookup_study(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.LookupStudyRequest( + parent="parent_value", + display_name="display_name_value", + ) + + # Make the request + response = client.lookup_study(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_LookupStudy_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py new file mode 100644 index 0000000000..ca540079db --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_StopTrial_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_stop_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = await client.stop_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_StopTrial_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py new file mode 100644 index 0000000000..8859749951 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StopTrial +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_StopTrial_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_stop_trial(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StopTrialRequest( + name="name_value", + ) + + # Make the request + response = client.stop_trial(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_StopTrial_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py new file mode 100644 index 0000000000..25c90f6119 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_SuggestTrials_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_suggest_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_SuggestTrials_async] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py new file mode 100644 index 0000000000..8d8b220145 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SuggestTrials +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VizierService_SuggestTrials_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_suggest_trials(): + # Create a client + client = aiplatform_v1beta1.VizierServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SuggestTrialsRequest( + parent="parent_value", + suggestion_count=1744, + client_id="client_id_value", + ) + + # Make the request + operation = client.suggest_trials(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VizierService_SuggestTrials_sync] diff --git a/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json b/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json new file mode 100644 index 0000000000..0d4c8a0532 --- /dev/null +++ b/owl-bot-staging/v1beta1/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json @@ -0,0 +1,31047 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.aiplatform.v1beta1", + "version": "v1beta1" + } + ], + "language": "PYTHON", + "name": "google-cloud-aiplatform" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.create_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_CreateDataset_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_create_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.create_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "CreateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_dataset" + }, + "description": "Sample for CreateDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_CreateDataset_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_create_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.delete_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_DeleteDataset_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.delete_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "DeleteDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_dataset" + }, + "description": "Sample for DeleteDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_DeleteDataset_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_delete_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.export_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ExportData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "aiplatform_v1beta1_generated_dataset_service_export_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ExportData_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_export_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.export_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ExportData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ExportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "export_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportDataConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_data" + }, + "description": "Sample for ExportData", + "file": "aiplatform_v1beta1_generated_dataset_service_export_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ExportData_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_export_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.get_annotation_spec", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetAnnotationSpec" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AnnotationSpec", + "shortName": "get_annotation_spec" + }, + "description": "Sample for GetAnnotationSpec", + "file": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetAnnotationSpec_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_annotation_spec_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.get_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetDataset_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.get_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.GetDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "GetDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDatasetRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "get_dataset" + }, + "description": "Sample for GetDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_GetDataset_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_get_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.import_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ImportData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "aiplatform_v1beta1_generated_dataset_service_import_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ImportData_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_import_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.import_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ImportData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ImportData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportDataRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "import_configs", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_data" + }, + "description": "Sample for ImportData", + "file": "aiplatform_v1beta1_generated_dataset_service_import_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ImportData_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_import_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.list_annotations", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListAnnotations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsAsyncPager", + "shortName": "list_annotations" + }, + "description": "Sample for ListAnnotations", + "file": "aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListAnnotations_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_annotations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.list_annotations", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListAnnotations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsPager", + "shortName": "list_annotations" + }, + "description": "Sample for ListAnnotations", + "file": "aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListAnnotations_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_annotations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.list_data_items", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDataItems" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsAsyncPager", + "shortName": "list_data_items" + }, + "description": "Sample for ListDataItems", + "file": "aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDataItems_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_data_items_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.list_data_items", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDataItems" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsPager", + "shortName": "list_data_items" + }, + "description": "Sample for ListDataItems", + "file": "aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDataItems_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_data_items_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.list_datasets", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsAsyncPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDatasets_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_datasets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.list_datasets", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "ListDatasets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsPager", + "shortName": "list_datasets" + }, + "description": "Sample for ListDatasets", + "file": "aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_ListDatasets_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_list_datasets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient", + "shortName": "DatasetServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceAsyncClient.update_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_UpdateDataset_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_update_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient", + "shortName": "DatasetServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.DatasetServiceClient.update_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.DatasetService", + "shortName": "DatasetService" + }, + "shortName": "UpdateDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest" + }, + { + "name": "dataset", + "type": "google.cloud.aiplatform_v1beta1.types.Dataset" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Dataset", + "shortName": "update_dataset" + }, + "description": "Sample for UpdateDataset", + "file": "aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_DatasetService_UpdateDataset_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_dataset_service_update_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.create_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "CreateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_endpoint" + }, + "description": "Sample for CreateEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.create_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "CreateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_endpoint" + }, + "description": "Sample for CreateEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_CreateEndpoint_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_create_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.delete_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeleteEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_endpoint" + }, + "description": "Sample for DeleteEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.delete_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeleteEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_endpoint" + }, + "description": "Sample for DeleteEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeleteEndpoint_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_delete_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.deploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeployModel_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.deploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.DeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_DeployModel_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_deploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.get_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "GetEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "get_endpoint" + }, + "description": "Sample for GetEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_GetEndpoint_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.get_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "GetEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "get_endpoint" + }, + "description": "Sample for GetEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_GetEndpoint_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_get_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.list_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "ListEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager", + "shortName": "list_endpoints" + }, + "description": "Sample for ListEndpoints", + "file": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_ListEndpoints_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.list_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "ListEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsPager", + "shortName": "list_endpoints" + }, + "description": "Sample for ListEndpoints", + "file": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_list_endpoints_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.undeploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_UndeployModel_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.undeploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UndeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "traffic_split", + "type": "Mapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_model" + }, + "description": "Sample for UndeployModel", + "file": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_UndeployModel_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_undeploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceAsyncClient.update_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UpdateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "update_endpoint" + }, + "description": "Sample for UpdateEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EndpointServiceClient.update_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "UpdateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.Endpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Endpoint", + "shortName": "update_endpoint" + }, + "description": "Sample for UpdateEndpoint", + "file": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EndpointService_UpdateEndpoint_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_endpoint_service_update_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "ReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" + }, + "description": "Sample for ReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "ReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" + }, + "description": "Sample for ReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "StreamingReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" + }, + "description": "Sample for StreamingReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "StreamingReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" + }, + "description": "Sample for StreamingReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_create_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchCreateFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_create_features" + }, + "description": "Sample for BatchCreateFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_create_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchCreateFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_create_features" + }, + "description": "Sample for BatchCreateFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_read_feature_values" + }, + "description": "Sample for BatchReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_async", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_read_feature_values" + }, + "description": "Sample for BatchReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_entity_type" + }, + "description": "Sample for CreateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_entity_type" + }, + "description": "Sample for CreateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature" + }, + "description": "Sample for CreateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature" + }, + "description": "Sample for CreateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_featurestore" + }, + "description": "Sample for CreateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_featurestore" + }, + "description": "Sample for CreateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_entity_type" + }, + "description": "Sample for DeleteEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_entity_type" + }, + "description": "Sample for DeleteEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature" + }, + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature" + }, + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_featurestore" + }, + "description": "Sample for DeleteFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "DeleteFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_featurestore" + }, + "description": "Sample for DeleteFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.export_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ExportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_feature_values" + }, + "description": "Sample for ExportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.export_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ExportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_feature_values" + }, + "description": "Sample for ExportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "get_entity_type" + }, + "description": "Sample for GetEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "get_entity_type" + }, + "description": "Sample for GetEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" + }, + "description": "Sample for GetFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" + }, + "description": "Sample for GetFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", + "shortName": "get_featurestore" + }, + "description": "Sample for GetFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "GetFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", + "shortName": "get_featurestore" + }, + "description": "Sample for GetFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.import_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ImportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_feature_values" + }, + "description": "Sample for ImportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.import_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ImportFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "import_feature_values" + }, + "description": "Sample for ImportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_entity_types", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListEntityTypes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager", + "shortName": "list_entity_types" + }, + "description": "Sample for ListEntityTypes", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_entity_types", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListEntityTypes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager", + "shortName": "list_entity_types" + }, + "description": "Sample for ListEntityTypes", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_featurestores", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeaturestores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager", + "shortName": "list_featurestores" + }, + "description": "Sample for ListFeaturestores", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_featurestores", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "ListFeaturestores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager", + "shortName": "list_featurestores" + }, + "description": "Sample for ListFeaturestores", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.search_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "SearchFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager", + "shortName": "search_features" + }, + "description": "Sample for SearchFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.search_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "SearchFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" + }, + { + "name": "location", + "type": "str" + }, + { + "name": "query", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager", + "shortName": "search_features" + }, + "description": "Sample for SearchFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "update_entity_type" + }, + "description": "Sample for UpdateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "update_entity_type" + }, + "description": "Sample for UpdateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "update_feature" + }, + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "update_feature" + }, + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_featurestore" + }, + "description": "Sample for UpdateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_featurestore", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "UpdateFeaturestore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" + }, + { + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_featurestore" + }, + "description": "Sample for UpdateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.create_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "CreateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index_endpoint" + }, + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.create_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "CreateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index_endpoint" + }, + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.delete_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeleteIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index_endpoint" + }, + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.delete_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeleteIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index_endpoint" + }, + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.deploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_index" + }, + "description": "Sample for DeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.deploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "DeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_index" + }, + "description": "Sample for DeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.get_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "GetIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "get_index_endpoint" + }, + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.get_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "GetIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "get_index_endpoint" + }, + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.list_index_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "ListIndexEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", + "shortName": "list_index_endpoints" + }, + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.list_index_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "ListIndexEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", + "shortName": "list_index_endpoints" + }, + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.mutate_deployed_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "MutateDeployedIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "mutate_deployed_index" + }, + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.mutate_deployed_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "MutateDeployedIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "mutate_deployed_index" + }, + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.undeploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UndeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_index" + }, + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.undeploy_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UndeployIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" + }, + { + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_index" + }, + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.update_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UpdateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "update_index_endpoint" + }, + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.update_index_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" + }, + "shortName": "UpdateIndexEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" + }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "update_index_endpoint" + }, + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.create_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "CreateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index" + }, + "description": "Sample for CreateIndex", + "file": "aiplatform_v1beta1_generated_index_service_create_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_create_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.create_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "CreateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index" + }, + "description": "Sample for CreateIndex", + "file": "aiplatform_v1beta1_generated_index_service_create_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_create_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.delete_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "DeleteIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_index" + }, + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1beta1_generated_index_service_delete_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_delete_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.delete_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "DeleteIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_index" + }, + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.get_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "GetIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Index", + "shortName": "get_index" + }, + "description": "Sample for GetIndex", + "file": "aiplatform_v1beta1_generated_index_service_get_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_get_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.get_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "GetIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Index", + "shortName": "get_index" + }, + "description": "Sample for GetIndex", + "file": "aiplatform_v1beta1_generated_index_service_get_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_get_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.list_indexes", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "ListIndexes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager", + "shortName": "list_indexes" + }, + "description": "Sample for ListIndexes", + "file": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.list_indexes", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "ListIndexes" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager", + "shortName": "list_indexes" + }, + "description": "Sample for ListIndexes", + "file": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.update_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "UpdateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_index" + }, + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1beta1_generated_index_service_update_index_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_update_index_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.update_index", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" + }, + "shortName": "UpdateIndex" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_index" + }, + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1beta1_generated_index_service_update_index_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_index_service_update_index_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" + }, + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_batch_prediction_job" + }, + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" + }, + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_custom_job" + }, + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" + }, + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_data_labeling_job" + }, + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" + }, + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CancelHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_hyperparameter_tuning_job" + }, + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" + }, + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" + }, + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "create_custom_job" + }, + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "custom_job", + "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "create_custom_job" + }, + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" + }, + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" + }, + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" + }, + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" + }, + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" + }, + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "CreateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" + }, + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_batch_prediction_job" + }, + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_batch_prediction_job" + }, + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_custom_job" + }, + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_custom_job" + }, + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_data_labeling_job" + }, + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_data_labeling_job" + }, + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_hyperparameter_tuning_job" + }, + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_hyperparameter_tuning_job" + }, + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_deployment_monitoring_job" + }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "DeleteModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_deployment_monitoring_job" + }, + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" + }, + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_batch_prediction_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetBatchPredictionJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" + }, + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "get_custom_job" + }, + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_custom_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetCustomJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "get_custom_job" + }, + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" + }, + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_data_labeling_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetDataLabelingJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" + }, + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" + }, + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_hyperparameter_tuning_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetHyperparameterTuningJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" + }, + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" + }, + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "GetModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" + }, + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_batch_prediction_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListBatchPredictionJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", + "shortName": "list_batch_prediction_jobs" + }, + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_batch_prediction_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListBatchPredictionJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager", + "shortName": "list_batch_prediction_jobs" + }, + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_custom_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListCustomJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager", + "shortName": "list_custom_jobs" + }, + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_custom_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListCustomJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager", + "shortName": "list_custom_jobs" + }, + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_data_labeling_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListDataLabelingJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", + "shortName": "list_data_labeling_jobs" + }, + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_data_labeling_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListDataLabelingJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager", + "shortName": "list_data_labeling_jobs" + }, + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListHyperparameterTuningJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", + "shortName": "list_hyperparameter_tuning_jobs" + }, + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_hyperparameter_tuning_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListHyperparameterTuningJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager", + "shortName": "list_hyperparameter_tuning_jobs" + }, + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListModelDeploymentMonitoringJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", + "shortName": "list_model_deployment_monitoring_jobs" + }, + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_model_deployment_monitoring_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ListModelDeploymentMonitoringJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", + "shortName": "list_model_deployment_monitoring_jobs" + }, + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "PauseModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" + }, + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.pause_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "PauseModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "pause_model_deployment_monitoring_job" + }, + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ResumeModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" + }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.resume_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "ResumeModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "resume_model_deployment_monitoring_job" + }, + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" + }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", + "segments": [ + { + "end": 46, + "start": 27, + "type": "FULL" + }, + { + "end": 46, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 47, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" + }, + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", + "segments": [ + { + "end": 46, + "start": 27, + "type": "FULL" + }, + { + "end": 46, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 47, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.update_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "UpdateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_model_deployment_monitoring_job" + }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.update_model_deployment_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" + }, + "shortName": "UpdateModelDeploymentMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_model_deployment_monitoring_job" + }, + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextArtifactsAndExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" + }, + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_artifacts_and_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextArtifactsAndExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "artifacts", + "type": "Sequence[str]" + }, + { + "name": "executions", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" + }, + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_children", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextChildren" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", + "shortName": "add_context_children" + }, + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_children", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddContextChildren" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", + "shortName": "add_context_children" + }, + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_execution_events", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddExecutionEvents" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" + }, + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_execution_events", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "AddExecutionEvents" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "events", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.Event]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" + }, + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "create_artifact" + }, + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "artifact_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "create_artifact" + }, + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "create_context" + }, + "description": "Sample for CreateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "context_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "create_context" + }, + "description": "Sample for CreateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "create_execution" + }, + "description": "Sample for CreateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "execution_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "create_execution" + }, + "description": "Sample for CreateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "create_metadata_schema" + }, + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "create_metadata_schema" + }, + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_metadata_store" + }, + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "CreateMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_metadata_store" + }, + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_artifact" + }, + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_artifact" + }, + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_context" + }, + "description": "Sample for DeleteContext", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_context" + }, + "description": "Sample for DeleteContext", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_execution" + }, + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_execution" + }, + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_metadata_store" + }, + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "DeleteMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_metadata_store" + }, + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "get_artifact" + }, + "description": "Sample for GetArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "get_artifact" + }, + "description": "Sample for GetArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "get_context" + }, + "description": "Sample for GetContext", + "file": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "get_context" + }, + "description": "Sample for GetContext", + "file": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "get_execution" + }, + "description": "Sample for GetExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "get_execution" + }, + "description": "Sample for GetExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "get_metadata_schema" + }, + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_schema", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataSchema" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "get_metadata_schema" + }, + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", + "shortName": "get_metadata_store" + }, + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "GetMetadataStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", + "shortName": "get_metadata_store" + }, + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager", + "shortName": "list_artifacts" + }, + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager", + "shortName": "list_artifacts" + }, + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager", + "shortName": "list_contexts" + }, + "description": "Sample for ListContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager", + "shortName": "list_contexts" + }, + "description": "Sample for ListContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager", + "shortName": "list_executions" + }, + "description": "Sample for ListExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager", + "shortName": "list_executions" + }, + "description": "Sample for ListExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_schemas", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataSchemas" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", + "shortName": "list_metadata_schemas" + }, + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_schemas", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataSchemas" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager", + "shortName": "list_metadata_schemas" + }, + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_stores", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataStores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", + "shortName": "list_metadata_stores" + }, + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_stores", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "ListMetadataStores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager", + "shortName": "list_metadata_stores" + }, + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_artifacts" + }, + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_artifacts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeArtifacts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_artifacts" + }, + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_contexts" + }, + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_contexts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeContexts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_contexts" + }, + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_executions" + }, + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_executions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "PurgeExecutions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_executions" + }, + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryArtifactLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" + }, + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_artifact_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryArtifactLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" + }, + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_context_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryContextLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" + }, + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_context_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryContextLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" + }, + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryExecutionInputsAndOutputs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" + }, + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_execution_inputs_and_outputs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryExecutionInputsAndOutputs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" + }, + { + "name": "execution", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" + }, + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "update_artifact" + }, + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "update_artifact" + }, + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "update_context" + }, + "description": "Sample for UpdateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_context", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "update_context" + }, + "description": "Sample for UpdateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "update_execution" + }, + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "update_execution" + }, + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.batch_migrate_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "BatchMigrateResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_migrate_resources" + }, + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.batch_migrate_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "BatchMigrateResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_migrate_resources" + }, + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.search_migratable_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "SearchMigratableResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", + "shortName": "search_migratable_resources" + }, + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.search_migratable_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "SearchMigratableResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager", + "shortName": "search_migratable_resources" + }, + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model_version", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_version" + }, + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model_version", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_version" + }, + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.export_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "aiplatform_v1beta1_generated_model_service_export_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_export_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.export_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "aiplatform_v1beta1_generated_model_service_export_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_export_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "aiplatform_v1beta1_generated_model_service_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "aiplatform_v1beta1_generated_model_service_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_get_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.import_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "import_model_evaluation" + }, + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.import_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "import_model_evaluation" + }, + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_versions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsAsyncPager", + "shortName": "list_model_versions" + }, + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_versions", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsPager", + "shortName": "list_model_versions" + }, + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_models", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "aiplatform_v1beta1_generated_model_service_list_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_models", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" + }, + "description": "Sample for ListModels", + "file": "aiplatform_v1beta1_generated_model_service_list_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_list_models_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.merge_version_aliases", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "MergeVersionAliases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "version_aliases", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "merge_version_aliases" + }, + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.merge_version_aliases", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "MergeVersionAliases" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "version_aliases", + "type": "Sequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "merge_version_aliases" + }, + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.update_explanation_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateExplanationDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_explanation_dataset" + }, + "description": "Sample for UpdateExplanationDataset", + "file": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.update_explanation_dataset", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateExplanationDataset" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_explanation_dataset" + }, + "description": "Sample for UpdateExplanationDataset", + "file": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.update_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "update_model" + }, + "description": "Sample for UpdateModel", + "file": "aiplatform_v1beta1_generated_model_service_update_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_update_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.update_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UpdateModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "update_model" + }, + "description": "Sample for UpdateModel", + "file": "aiplatform_v1beta1_generated_model_service_update_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_update_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.upload_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UploadModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "upload_model" + }, + "description": "Sample for UploadModel", + "file": "aiplatform_v1beta1_generated_model_service_upload_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_upload_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.upload_model", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" + }, + "shortName": "UploadModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "upload_model" + }, + "description": "Sample for UploadModel", + "file": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" + }, + "description": "Sample for CancelPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_pipeline_job" + }, + "description": "Sample for CancelPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" + }, + "description": "Sample for CancelTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CancelTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "cancel_training_pipeline" + }, + "description": "Sample for CancelTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreatePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "create_pipeline_job" + }, + "description": "Sample for CreatePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreatePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "create_pipeline_job" + }, + "description": "Sample for CreatePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreateTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "create_training_pipeline" + }, + "description": "Sample for CreateTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "CreateTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "create_training_pipeline" + }, + "description": "Sample for CreateTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeletePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_pipeline_job" + }, + "description": "Sample for DeletePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeletePipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_pipeline_job" + }, + "description": "Sample for DeletePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeleteTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_training_pipeline" + }, + "description": "Sample for DeleteTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "DeleteTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_training_pipeline" + }, + "description": "Sample for DeleteTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "get_pipeline_job" + }, + "description": "Sample for GetPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_pipeline_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetPipelineJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "get_pipeline_job" + }, + "description": "Sample for GetPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "get_training_pipeline" + }, + "description": "Sample for GetTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_training_pipeline", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "GetTrainingPipeline" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "get_training_pipeline" + }, + "description": "Sample for GetTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_pipeline_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListPipelineJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager", + "shortName": "list_pipeline_jobs" + }, + "description": "Sample for ListPipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_pipeline_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListPipelineJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager", + "shortName": "list_pipeline_jobs" + }, + "description": "Sample for ListPipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_training_pipelines", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListTrainingPipelines" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager", + "shortName": "list_training_pipelines" + }, + "description": "Sample for ListTrainingPipelines", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_training_pipelines", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" + }, + "shortName": "ListTrainingPipelines" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager", + "shortName": "list_training_pipelines" + }, + "description": "Sample for ListTrainingPipelines", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.explain", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Explain" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", + "shortName": "explain" + }, + "description": "Sample for Explain", + "file": "aiplatform_v1beta1_generated_prediction_service_explain_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_explain_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.explain", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Explain" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", + "shortName": "explain" + }, + "description": "Sample for Explain", + "file": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.predict", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "aiplatform_v1beta1_generated_prediction_service_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.predict", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "Predict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "Sequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", + "shortName": "predict" + }, + "description": "Sample for Predict", + "file": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.raw_predict", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "RawPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" + }, + "description": "Sample for RawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.raw_predict", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "RawPredict" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" + }, + "description": "Sample for RawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.create_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "CreateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_specialist_pool" + }, + "description": "Sample for CreateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.create_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "CreateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_specialist_pool" + }, + "description": "Sample for CreateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.delete_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "DeleteSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_specialist_pool" + }, + "description": "Sample for DeleteSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.delete_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "DeleteSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_specialist_pool" + }, + "description": "Sample for DeleteSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.get_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "GetSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", + "shortName": "get_specialist_pool" + }, + "description": "Sample for GetSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.get_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "GetSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", + "shortName": "get_specialist_pool" + }, + "description": "Sample for GetSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.list_specialist_pools", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "ListSpecialistPools" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager", + "shortName": "list_specialist_pools" + }, + "description": "Sample for ListSpecialistPools", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.list_specialist_pools", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "ListSpecialistPools" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager", + "shortName": "list_specialist_pools" + }, + "description": "Sample for ListSpecialistPools", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.update_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "UpdateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_specialist_pool" + }, + "description": "Sample for UpdateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.update_specialist_pool", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" + }, + "shortName": "UpdateSpecialistPool" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" + }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_specialist_pool" + }, + "description": "Sample for UpdateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" + }, + "description": "Sample for BatchCreateTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" + }, + "description": "Sample for BatchCreateTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" + }, + "description": "Sample for BatchCreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchCreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" + }, + "description": "Sample for BatchCreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" + }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "BatchReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" + }, + "description": "Sample for BatchReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" + }, + "description": "Sample for CreateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" + }, + "description": "Sample for CreateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "create_tensorboard_run" + }, + "description": "Sample for CreateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "create_tensorboard_run" + }, + "description": "Sample for CreateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" + }, + "description": "Sample for CreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" + }, + "description": "Sample for CreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_tensorboard" + }, + "description": "Sample for CreateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "CreateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_tensorboard" + }, + "description": "Sample for CreateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_experiment" + }, + "description": "Sample for DeleteTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_experiment" + }, + "description": "Sample for DeleteTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_run" + }, + "description": "Sample for DeleteTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_run" + }, + "description": "Sample for DeleteTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_time_series" + }, + "description": "Sample for DeleteTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_time_series" + }, + "description": "Sample for DeleteTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard" + }, + "description": "Sample for DeleteTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "DeleteTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard" + }, + "description": "Sample for DeleteTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.export_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ExportTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager", + "shortName": "export_tensorboard_time_series_data" + }, + "description": "Sample for ExportTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.export_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ExportTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager", + "shortName": "export_tensorboard_time_series_data" + }, + "description": "Sample for ExportTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" + }, + "description": "Sample for GetTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" + }, + "description": "Sample for GetTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "get_tensorboard_run" + }, + "description": "Sample for GetTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "get_tensorboard_run" + }, + "description": "Sample for GetTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" + }, + "description": "Sample for GetTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" + }, + "description": "Sample for GetTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", + "shortName": "get_tensorboard" + }, + "description": "Sample for GetTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "GetTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", + "shortName": "get_tensorboard" + }, + "description": "Sample for GetTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_experiments", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardExperiments" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager", + "shortName": "list_tensorboard_experiments" + }, + "description": "Sample for ListTensorboardExperiments", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_experiments", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardExperiments" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager", + "shortName": "list_tensorboard_experiments" + }, + "description": "Sample for ListTensorboardExperiments", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager", + "shortName": "list_tensorboard_runs" + }, + "description": "Sample for ListTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_runs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardRuns" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager", + "shortName": "list_tensorboard_runs" + }, + "description": "Sample for ListTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager", + "shortName": "list_tensorboard_time_series" + }, + "description": "Sample for ListTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager", + "shortName": "list_tensorboard_time_series" + }, + "description": "Sample for ListTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboards", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboards" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager", + "shortName": "list_tensorboards" + }, + "description": "Sample for ListTensorboards", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboards", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ListTensorboards" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager", + "shortName": "list_tensorboards" + }, + "description": "Sample for ListTensorboards", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardBlobData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" + }, + "description": "Sample for ReadTensorboardBlobData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_blob_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardBlobData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" + }, + { + "name": "time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" + }, + "description": "Sample for ReadTensorboardBlobData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" + }, + "description": "Sample for ReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_time_series_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "ReadTensorboardTimeSeriesData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" + }, + { + "name": "tensorboard_time_series", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" + }, + "description": "Sample for ReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" + }, + "description": "Sample for UpdateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_async", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_experiment", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardExperiment" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" + }, + { + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" + }, + "description": "Sample for UpdateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_sync", + "segments": [ + { + "end": 43, + "start": 27, + "type": "FULL" + }, + { + "end": 43, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 37, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 40, + "start": 38, + "type": "REQUEST_EXECUTION" + }, + { + "end": 44, + "start": 41, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "update_tensorboard_run" + }, + "description": "Sample for UpdateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_async", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_run", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardRun" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" + }, + { + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "update_tensorboard_run" + }, + "description": "Sample for UpdateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_sync", + "segments": [ + { + "end": 47, + "start": 27, + "type": "FULL" + }, + { + "end": 47, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 44, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 48, + "start": 45, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" + }, + "description": "Sample for UpdateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_time_series", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboardTimeSeries" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" + }, + { + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" + }, + "description": "Sample for UpdateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 42, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 43, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_tensorboard" + }, + "description": "Sample for UpdateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "UpdateTensorboard" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" + }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_tensorboard" + }, + "description": "Sample for UpdateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 41, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 42, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_experiment_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardExperimentData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" + }, + "description": "Sample for WriteTensorboardExperimentData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_experiment_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardExperimentData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" + }, + { + "name": "tensorboard_experiment", + "type": "str" + }, + { + "name": "write_run_data_requests", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" + }, + "description": "Sample for WriteTensorboardExperimentData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_run_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardRunData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" + }, + "description": "Sample for WriteTensorboardRunData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_run_data", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" + }, + "shortName": "WriteTensorboardRunData" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" + }, + { + "name": "tensorboard_run", + "type": "str" + }, + { + "name": "time_series_data", + "type": "Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" + }, + "description": "Sample for WriteTensorboardRunData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 43, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 46, + "start": 44, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "start": 47, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.add_trial_measurement", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "AddTrialMeasurement" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "add_trial_measurement" + }, + "description": "Sample for AddTrialMeasurement", + "file": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.add_trial_measurement", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "AddTrialMeasurement" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "add_trial_measurement" + }, + "description": "Sample for AddTrialMeasurement", + "file": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_AddTrialMeasurement_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_add_trial_measurement_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.check_trial_early_stopping_state", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CheckTrialEarlyStoppingState" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "check_trial_early_stopping_state" + }, + "description": "Sample for CheckTrialEarlyStoppingState", + "file": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_async", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.check_trial_early_stopping_state", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CheckTrialEarlyStoppingState" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "check_trial_early_stopping_state" + }, + "description": "Sample for CheckTrialEarlyStoppingState", + "file": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CheckTrialEarlyStoppingState_sync", + "segments": [ + { + "end": 48, + "start": 27, + "type": "FULL" + }, + { + "end": 48, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 45, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 49, + "start": 46, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_check_trial_early_stopping_state_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.complete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CompleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "complete_trial" + }, + "description": "Sample for CompleteTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CompleteTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_complete_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.complete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CompleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "complete_trial" + }, + "description": "Sample for CompleteTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CompleteTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_complete_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.create_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1beta1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "create_study" + }, + "description": "Sample for CreateStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_create_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateStudy_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.create_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "study", + "type": "google.cloud.aiplatform_v1beta1.types.Study" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "create_study" + }, + "description": "Sample for CreateStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_create_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateStudy_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.create_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1beta1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "create_trial" + }, + "description": "Sample for CreateTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_create_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.create_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.CreateTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "CreateTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrialRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "trial", + "type": "google.cloud.aiplatform_v1beta1.types.Trial" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "create_trial" + }, + "description": "Sample for CreateTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_CreateTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_create_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.delete_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" + }, + "description": "Sample for DeleteStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_delete_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteStudy_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.delete_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_study" + }, + "description": "Sample for DeleteStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteStudy_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.delete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" + }, + "description": "Sample for DeleteTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteTrial_async", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.delete_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "DeleteTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_trial" + }, + "description": "Sample for DeleteTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_DeleteTrial_sync", + "segments": [ + { + "end": 42, + "start": 27, + "type": "FULL" + }, + { + "end": 42, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_delete_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.get_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "get_study" + }, + "description": "Sample for GetStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_get_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_GetStudy_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.get_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetStudyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "get_study" + }, + "description": "Sample for GetStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_get_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_GetStudy_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.get_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "get_trial" + }, + "description": "Sample for GetTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_get_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_GetTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.get_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.GetTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "GetTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetTrialRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "get_trial" + }, + "description": "Sample for GetTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_GetTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_get_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.list_optimal_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListOptimalTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" + }, + "description": "Sample for ListOptimalTrials", + "file": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.list_optimal_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListOptimalTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse", + "shortName": "list_optimal_trials" + }, + "description": "Sample for ListOptimalTrials", + "file": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_ListOptimalTrials_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_optimal_trials_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.list_studies", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListStudies", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListStudies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesAsyncPager", + "shortName": "list_studies" + }, + "description": "Sample for ListStudies", + "file": "aiplatform_v1beta1_generated_vizier_service_list_studies_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_ListStudies_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_studies_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.list_studies", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListStudies", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListStudies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListStudiesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesPager", + "shortName": "list_studies" + }, + "description": "Sample for ListStudies", + "file": "aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_ListStudies_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_studies_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.list_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsAsyncPager", + "shortName": "list_trials" + }, + "description": "Sample for ListTrials", + "file": "aiplatform_v1beta1_generated_vizier_service_list_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_ListTrials_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_trials_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.list_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.ListTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "ListTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListTrialsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsPager", + "shortName": "list_trials" + }, + "description": "Sample for ListTrials", + "file": "aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_ListTrials_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_list_trials_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.lookup_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.LookupStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "LookupStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "lookup_study" + }, + "description": "Sample for LookupStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_LookupStudy_async", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_lookup_study_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.lookup_study", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.LookupStudy", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "LookupStudy" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.LookupStudyRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Study", + "shortName": "lookup_study" + }, + "description": "Sample for LookupStudy", + "file": "aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_LookupStudy_sync", + "segments": [ + { + "end": 45, + "start": 27, + "type": "FULL" + }, + { + "end": 45, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 39, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 42, + "start": 40, + "type": "REQUEST_EXECUTION" + }, + { + "end": 46, + "start": 43, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_lookup_study_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.stop_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.StopTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "StopTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "stop_trial" + }, + "description": "Sample for StopTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_StopTrial_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_stop_trial_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.stop_trial", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.StopTrial", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "StopTrial" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StopTrialRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Trial", + "shortName": "stop_trial" + }, + "description": "Sample for StopTrial", + "file": "aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_StopTrial_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_stop_trial_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient", + "shortName": "VizierServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceAsyncClient.suggest_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "SuggestTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "suggest_trials" + }, + "description": "Sample for SuggestTrials", + "file": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_SuggestTrials_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 40, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient", + "shortName": "VizierServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.VizierServiceClient.suggest_trials", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.VizierService", + "shortName": "VizierService" + }, + "shortName": "SuggestTrials" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "suggest_trials" + }, + "description": "Sample for SuggestTrials", + "file": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_VizierService_SuggestTrials_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 40, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_vizier_service_suggest_trials_sync.py" + } + ] +} diff --git a/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py new file mode 100644 index 0000000000..0b63343a02 --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py @@ -0,0 +1,364 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class aiplatformCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'add_context_artifacts_and_executions': ('context', 'artifacts', 'executions', ), + 'add_context_children': ('context', 'child_contexts', ), + 'add_execution_events': ('execution', 'events', ), + 'add_trial_measurement': ('trial_name', 'measurement', ), + 'batch_create_features': ('parent', 'requests', ), + 'batch_create_tensorboard_runs': ('parent', 'requests', ), + 'batch_create_tensorboard_time_series': ('parent', 'requests', ), + 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), + 'batch_read_feature_values': ('featurestore', 'destination', 'entity_type_specs', 'csv_read_instances', 'bigquery_read_instances', 'pass_through_fields', ), + 'batch_read_tensorboard_time_series_data': ('tensorboard', 'time_series', ), + 'cancel_batch_prediction_job': ('name', ), + 'cancel_custom_job': ('name', ), + 'cancel_data_labeling_job': ('name', ), + 'cancel_hyperparameter_tuning_job': ('name', ), + 'cancel_pipeline_job': ('name', ), + 'cancel_training_pipeline': ('name', ), + 'check_trial_early_stopping_state': ('trial_name', ), + 'complete_trial': ('name', 'final_measurement', 'trial_infeasible', 'infeasible_reason', ), + 'create_artifact': ('parent', 'artifact', 'artifact_id', ), + 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), + 'create_context': ('parent', 'context', 'context_id', ), + 'create_custom_job': ('parent', 'custom_job', ), + 'create_data_labeling_job': ('parent', 'data_labeling_job', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_endpoint': ('parent', 'endpoint', 'endpoint_id', ), + 'create_entity_type': ('parent', 'entity_type_id', 'entity_type', ), + 'create_execution': ('parent', 'execution', 'execution_id', ), + 'create_feature': ('parent', 'feature', 'feature_id', ), + 'create_featurestore': ('parent', 'featurestore', 'featurestore_id', ), + 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), + 'create_index': ('parent', 'index', ), + 'create_index_endpoint': ('parent', 'index_endpoint', ), + 'create_metadata_schema': ('parent', 'metadata_schema', 'metadata_schema_id', ), + 'create_metadata_store': ('parent', 'metadata_store', 'metadata_store_id', ), + 'create_model_deployment_monitoring_job': ('parent', 'model_deployment_monitoring_job', ), + 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), + 'create_specialist_pool': ('parent', 'specialist_pool', ), + 'create_study': ('parent', 'study', ), + 'create_tensorboard': ('parent', 'tensorboard', ), + 'create_tensorboard_experiment': ('parent', 'tensorboard_experiment_id', 'tensorboard_experiment', ), + 'create_tensorboard_run': ('parent', 'tensorboard_run', 'tensorboard_run_id', ), + 'create_tensorboard_time_series': ('parent', 'tensorboard_time_series', 'tensorboard_time_series_id', ), + 'create_training_pipeline': ('parent', 'training_pipeline', ), + 'create_trial': ('parent', 'trial', ), + 'delete_artifact': ('name', 'etag', ), + 'delete_batch_prediction_job': ('name', ), + 'delete_context': ('name', 'force', 'etag', ), + 'delete_custom_job': ('name', ), + 'delete_data_labeling_job': ('name', ), + 'delete_dataset': ('name', ), + 'delete_endpoint': ('name', ), + 'delete_entity_type': ('name', 'force', ), + 'delete_execution': ('name', 'etag', ), + 'delete_feature': ('name', ), + 'delete_featurestore': ('name', 'force', ), + 'delete_hyperparameter_tuning_job': ('name', ), + 'delete_index': ('name', ), + 'delete_index_endpoint': ('name', ), + 'delete_metadata_store': ('name', 'force', ), + 'delete_model': ('name', ), + 'delete_model_deployment_monitoring_job': ('name', ), + 'delete_model_version': ('name', ), + 'delete_pipeline_job': ('name', ), + 'delete_specialist_pool': ('name', 'force', ), + 'delete_study': ('name', ), + 'delete_tensorboard': ('name', ), + 'delete_tensorboard_experiment': ('name', ), + 'delete_tensorboard_run': ('name', ), + 'delete_tensorboard_time_series': ('name', ), + 'delete_training_pipeline': ('name', ), + 'delete_trial': ('name', ), + 'deploy_index': ('index_endpoint', 'deployed_index', ), + 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), + 'explain': ('endpoint', 'instances', 'parameters', 'explanation_spec_override', 'deployed_model_id', ), + 'export_data': ('name', 'export_config', ), + 'export_feature_values': ('entity_type', 'destination', 'feature_selector', 'snapshot_export', 'full_export', 'settings', ), + 'export_model': ('name', 'output_config', ), + 'export_tensorboard_time_series_data': ('tensorboard_time_series', 'filter', 'page_size', 'page_token', 'order_by', ), + 'get_annotation_spec': ('name', 'read_mask', ), + 'get_artifact': ('name', ), + 'get_batch_prediction_job': ('name', ), + 'get_context': ('name', ), + 'get_custom_job': ('name', ), + 'get_data_labeling_job': ('name', ), + 'get_dataset': ('name', 'read_mask', ), + 'get_endpoint': ('name', ), + 'get_entity_type': ('name', ), + 'get_execution': ('name', ), + 'get_feature': ('name', ), + 'get_featurestore': ('name', ), + 'get_hyperparameter_tuning_job': ('name', ), + 'get_index': ('name', ), + 'get_index_endpoint': ('name', ), + 'get_metadata_schema': ('name', ), + 'get_metadata_store': ('name', ), + 'get_model': ('name', ), + 'get_model_deployment_monitoring_job': ('name', ), + 'get_model_evaluation': ('name', ), + 'get_model_evaluation_slice': ('name', ), + 'get_pipeline_job': ('name', ), + 'get_specialist_pool': ('name', ), + 'get_study': ('name', ), + 'get_tensorboard': ('name', ), + 'get_tensorboard_experiment': ('name', ), + 'get_tensorboard_run': ('name', ), + 'get_tensorboard_time_series': ('name', ), + 'get_training_pipeline': ('name', ), + 'get_trial': ('name', ), + 'import_data': ('name', 'import_configs', ), + 'import_feature_values': ('entity_type', 'feature_specs', 'avro_source', 'bigquery_source', 'csv_source', 'feature_time_field', 'feature_time', 'entity_id_field', 'disable_online_serving', 'worker_count', 'disable_ingestion_analysis', ), + 'import_model_evaluation': ('parent', 'model_evaluation', ), + 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_artifacts': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_contexts': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_entity_types': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_executions': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_features': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', 'latest_stats_count', ), + 'list_featurestores': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_index_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_indexes': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_metadata_schemas': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_metadata_stores': ('parent', 'page_size', 'page_token', ), + 'list_model_deployment_monitoring_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_versions': ('name', 'page_size', 'page_token', 'filter', 'read_mask', ), + 'list_optimal_trials': ('parent', ), + 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', ), + 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), + 'list_studies': ('parent', 'page_token', 'page_size', ), + 'list_tensorboard_experiments': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboard_runs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboards': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboard_time_series': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_trials': ('parent', 'page_token', 'page_size', ), + 'lookup_study': ('parent', 'display_name', ), + 'merge_version_aliases': ('name', 'version_aliases', ), + 'mutate_deployed_index': ('index_endpoint', 'deployed_index', ), + 'pause_model_deployment_monitoring_job': ('name', ), + 'predict': ('endpoint', 'instances', 'parameters', ), + 'purge_artifacts': ('parent', 'filter', 'force', ), + 'purge_contexts': ('parent', 'filter', 'force', ), + 'purge_executions': ('parent', 'filter', 'force', ), + 'query_artifact_lineage_subgraph': ('artifact', 'max_hops', 'filter', ), + 'query_context_lineage_subgraph': ('context', ), + 'query_execution_inputs_and_outputs': ('execution', ), + 'raw_predict': ('endpoint', 'http_body', ), + 'read_feature_values': ('entity_type', 'entity_id', 'feature_selector', ), + 'read_tensorboard_blob_data': ('time_series', 'blob_ids', ), + 'read_tensorboard_time_series_data': ('tensorboard_time_series', 'max_data_points', 'filter', ), + 'resume_model_deployment_monitoring_job': ('name', ), + 'search_features': ('location', 'query', 'page_size', 'page_token', ), + 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), + 'search_model_deployment_monitoring_stats_anomalies': ('model_deployment_monitoring_job', 'deployed_model_id', 'objectives', 'feature_display_name', 'page_size', 'page_token', 'start_time', 'end_time', ), + 'stop_trial': ('name', ), + 'streaming_read_feature_values': ('entity_type', 'entity_ids', 'feature_selector', ), + 'suggest_trials': ('parent', 'suggestion_count', 'client_id', ), + 'undeploy_index': ('index_endpoint', 'deployed_index_id', ), + 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), + 'update_artifact': ('artifact', 'update_mask', 'allow_missing', ), + 'update_context': ('context', 'update_mask', 'allow_missing', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_endpoint': ('endpoint', 'update_mask', ), + 'update_entity_type': ('entity_type', 'update_mask', ), + 'update_execution': ('execution', 'update_mask', 'allow_missing', ), + 'update_explanation_dataset': ('model', 'examples', ), + 'update_feature': ('feature', 'update_mask', ), + 'update_featurestore': ('featurestore', 'update_mask', ), + 'update_index': ('index', 'update_mask', ), + 'update_index_endpoint': ('index_endpoint', 'update_mask', ), + 'update_model': ('model', 'update_mask', ), + 'update_model_deployment_monitoring_job': ('model_deployment_monitoring_job', 'update_mask', ), + 'update_specialist_pool': ('specialist_pool', 'update_mask', ), + 'update_tensorboard': ('update_mask', 'tensorboard', ), + 'update_tensorboard_experiment': ('update_mask', 'tensorboard_experiment', ), + 'update_tensorboard_run': ('update_mask', 'tensorboard_run', ), + 'update_tensorboard_time_series': ('update_mask', 'tensorboard_time_series', ), + 'upload_model': ('parent', 'model', 'parent_model', 'model_id', ), + 'write_tensorboard_experiment_data': ('tensorboard_experiment', 'write_run_data_requests', ), + 'write_tensorboard_run_data': ('tensorboard_run', 'time_series_data', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=aiplatformCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the aiplatform client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py new file mode 100644 index 0000000000..407ff3a88e --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class definitionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=definitionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the definition client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py new file mode 100644 index 0000000000..907918ad25 --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py new file mode 100644 index 0000000000..b868205c2b --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py new file mode 100644 index 0000000000..69756d1b09 --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py new file mode 100644 index 0000000000..6f63394ab6 --- /dev/null +++ b/owl-bot-staging/v1beta1/setup.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1beta1-schema-trainingjob-definition', + author="Google LLC", + author_email="googleapis-packages@google.com", + url="https://github.com/googleapis/python-google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.trainingjob'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.31.0, < 3.0.0dev', + 'libcst >= 0.2.5', + 'googleapis-common-protos >= 1.55.0, <2.0.0dev', + 'proto-plus >= 1.19.7', + ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py new file mode 100644 index 0000000000..3e736ec3df --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -0,0 +1,5583 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceClient +from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers +from google.cloud.aiplatform_v1beta1.services.dataset_service import transports +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DatasetServiceClient._get_default_mtls_endpoint(None) is None + assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), +]) +def test_dataset_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.DatasetServiceGrpcTransport, "grpc"), + (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (DatasetServiceClient, "grpc"), + (DatasetServiceAsyncClient, "grpc_asyncio"), +]) +def test_dataset_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_dataset_service_client_get_transport_class(): + transport = DatasetServiceClient.get_transport_class() + available_transports = [ + transports.DatasetServiceGrpcTransport, + ] + assert transport in available_transports + + transport = DatasetServiceClient.get_transport_class("grpc") + assert transport == transports.DatasetServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +def test_dataset_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, DatasetServiceAsyncClient +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +def test_dataset_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", grpc_helpers), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_dataset_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = DatasetServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", grpc_helpers), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_dataset_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.CreateDatasetRequest, + dict, +]) +def test_create_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_dataset_async_from_dict(): + await test_create_dataset_async(request_type=dict) + + +def test_create_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + + +def test_create_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.GetDatasetRequest, + dict, +]) +def test_get_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_dataset_async_from_dict(): + await test_get_dataset_async(request_type=dict) + + +def test_get_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = dataset.Dataset() + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.UpdateDatasetRequest, + dict, +]) +def test_update_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_dataset_async_from_dict(): + await test_update_dataset_async(request_type=dict) + + +def test_update_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = gca_dataset.Dataset() + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=name_value', + ) in kw['metadata'] + + +def test_update_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].dataset + mock_val = gca_dataset.Dataset(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ListDatasetsRequest, + dict, +]) +def test_list_datasets(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_datasets_async_from_dict(): + await test_list_datasets_async(request_type=dict) + + +def test_list_datasets_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = dataset_service.ListDatasetsResponse() + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_datasets_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_datasets_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_pager(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) +def test_list_datasets_pages(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_datasets(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + dataset_service.DeleteDatasetRequest, + dict, +]) +def test_delete_dataset(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_dataset_async_from_dict(): + await test_delete_dataset_async(request_type=dict) + + +def test_delete_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ImportDataRequest, + dict, +]) +def test_import_data(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_data_async_from_dict(): + await test_import_data_async(request_type=dict) + + +def test_import_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_import_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].import_configs + mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + assert arg == mock_val + + +def test_import_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].import_configs + mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ExportDataRequest, + dict, +]) +def test_export_data(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_data_async_from_dict(): + await test_export_data_async(request_type=dict) + + +def test_export_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].export_config + mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + + +def test_export_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].export_config + mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ListDataItemsRequest, + dict, +]) +def test_list_data_items(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_items_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + client.list_data_items() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + +@pytest.mark.asyncio +async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_items_async_from_dict(): + await test_list_data_items_async(request_type=dict) + + +def test_list_data_items_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = dataset_service.ListDataItemsResponse() + client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_items_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_data_items_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_data_items_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_data_items_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_data_items_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + + +def test_list_data_items_pager(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_items(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in results) +def test_list_data_items_pages(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_items(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_items_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_items(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_data_items_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_items(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + dataset_service.GetAnnotationSpecRequest, + dict, +]) +def test_get_annotation_spec(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + )) + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async_from_dict(): + await test_get_annotation_spec_async(request_type=dict) + + +def test_get_annotation_spec_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = annotation_spec.AnnotationSpec() + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_annotation_spec_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_annotation_spec_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + dataset_service.ListAnnotationsRequest, + dict, +]) +def test_list_annotations(request_type, transport: str = 'grpc'): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_annotations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + client.list_annotations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + +@pytest.mark.asyncio +async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_annotations_async_from_dict(): + await test_list_annotations_async(request_type=dict) + + +def test_list_annotations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = dataset_service.ListAnnotationsResponse() + client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_annotations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_annotations_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_annotations_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_annotations_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_annotations_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + + +def test_list_annotations_pager(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_annotations(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in results) +def test_list_annotations_pages(transport_name: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_annotations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_annotations_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_annotations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_annotations_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_annotations(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DatasetServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.DatasetServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = DatasetServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.DatasetServiceGrpcTransport, + ) + +def test_dataset_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_dataset_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_dataset', + 'get_dataset', + 'update_dataset', + 'list_datasets', + 'delete_dataset', + 'import_data', + 'export_data', + 'list_data_items', + 'get_annotation_spec', + 'list_annotations', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_dataset_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_dataset_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport() + adc.assert_called_once() + + +def test_dataset_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DatasetServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, + ], +) +def test_dataset_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.DatasetServiceGrpcTransport, grpc_helpers), + (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_dataset_service_host_no_port(transport_name): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_dataset_service_host_with_port(transport_name): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_dataset_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_dataset_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_dataset_service_grpc_lro_client(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_dataset_service_grpc_lro_async_client(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotation_path(): + project = "squid" + location = "clam" + dataset = "whelk" + data_item = "octopus" + annotation = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) + assert expected == actual + + +def test_parse_annotation_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + "data_item": "winkle", + "annotation": "nautilus", + } + path = DatasetServiceClient.annotation_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_path(path) + assert expected == actual + +def test_annotation_spec_path(): + project = "scallop" + location = "abalone" + dataset = "squid" + annotation_spec = "clam" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) + assert expected == actual + + +def test_parse_annotation_spec_path(): + expected = { + "project": "whelk", + "location": "octopus", + "dataset": "oyster", + "annotation_spec": "nudibranch", + } + path = DatasetServiceClient.annotation_spec_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_spec_path(path) + assert expected == actual + +def test_data_item_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + data_item = "nautilus" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) + assert expected == actual + + +def test_parse_data_item_path(): + expected = { + "project": "scallop", + "location": "abalone", + "dataset": "squid", + "data_item": "clam", + } + path = DatasetServiceClient.data_item_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_data_item_path(path) + assert expected == actual + +def test_dataset_path(): + project = "whelk" + location = "octopus" + dataset = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = DatasetServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + } + path = DatasetServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = DatasetServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = DatasetServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = DatasetServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = DatasetServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = DatasetServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = DatasetServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = DatasetServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = DatasetServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = DatasetServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = DatasetServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = DatasetServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py new file mode 100644 index 0000000000..c21c267378 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -0,0 +1,4536 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.services.endpoint_service import transports +from google.cloud.aiplatform_v1beta1.types import accelerator_type +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert EndpointServiceClient._get_default_mtls_endpoint(None) is None + assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_endpoint_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.EndpointServiceGrpcTransport, "grpc"), + (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (EndpointServiceClient, "grpc"), + (EndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_endpoint_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_endpoint_service_client_get_transport_class(): + transport = EndpointServiceClient.get_transport_class() + available_transports = [ + transports.EndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = EndpointServiceClient.get_transport_class("grpc") + assert transport == transports.EndpointServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, EndpointServiceAsyncClient +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +def test_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", grpc_helpers), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_endpoint_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = EndpointServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", grpc_helpers), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_endpoint_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.CreateEndpointRequest, + dict, +]) +def test_create_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + client.create_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + +@pytest.mark.asyncio +async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_endpoint_async_from_dict(): + await test_create_endpoint_async(request_type=dict) + + +def test_create_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].endpoint_id + mock_val = 'endpoint_id_value' + assert arg == mock_val + + +def test_create_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].endpoint_id + mock_val = 'endpoint_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + endpoint_id='endpoint_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.GetEndpointRequest, + dict, +]) +def test_get_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + ) + response = client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +def test_get_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + client.get_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + +@pytest.mark.asyncio +async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + )) + response = await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +@pytest.mark.asyncio +async def test_get_endpoint_async_from_dict(): + await test_get_endpoint_async(request_type=dict) + + +def test_get_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = endpoint.Endpoint() + client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.ListEndpointsRequest, + dict, +]) +def test_list_endpoints(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + client.list_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + +@pytest.mark.asyncio +async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_endpoints_async_from_dict(): + await test_list_endpoints_async(request_type=dict) + + +def test_list_endpoints_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = endpoint_service.ListEndpointsResponse() + client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_endpoints_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_endpoints_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_endpoints_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + + +def test_list_endpoints_pager(transport_name: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_endpoints(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in results) +def test_list_endpoints_pages(transport_name: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_endpoints(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_endpoints_async_pager(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_endpoints(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_endpoints_async_pages(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_endpoints(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + endpoint_service.UpdateEndpointRequest, + dict, +]) +def test_update_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + ) + response = client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +def test_update_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + client.update_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + +@pytest.mark.asyncio +async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + )) + response = await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + + +@pytest.mark.asyncio +async def test_update_endpoint_async_from_dict(): + await test_update_endpoint_async(request_type=dict) + + +def test_update_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = gca_endpoint.Endpoint() + client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=name_value', + ) in kw['metadata'] + + +def test_update_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = gca_endpoint.Endpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.DeleteEndpointRequest, + dict, +]) +def test_delete_endpoint(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + client.delete_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + +@pytest.mark.asyncio +async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_endpoint_async_from_dict(): + await test_delete_endpoint_async(request_type=dict) + + +def test_delete_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.DeployModelRequest, + dict, +]) +def test_deploy_model(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_model_async_from_dict(): + await test_deploy_model_async(request_type=dict) + + +def test_deploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_deploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model + mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + + +def test_deploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model + mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + +@pytest.mark.parametrize("request_type", [ + endpoint_service.UndeployModelRequest, + dict, +]) +def test_undeploy_model(request_type, transport: str = 'grpc'): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_model_async_from_dict(): + await test_undeploy_model_async(request_type=dict) + + +def test_undeploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_undeploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + + +def test_undeploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + arg = args[0].traffic_split + mock_val = {'key_value': 541} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = EndpointServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.EndpointServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = EndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.EndpointServiceGrpcTransport, + ) + +def test_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_endpoint', + 'get_endpoint', + 'list_endpoints', + 'update_endpoint', + 'delete_endpoint', + 'deploy_model', + 'undeploy_model', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport() + adc.assert_called_once() + + +def test_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + EndpointServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, + ], +) +def test_endpoint_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.EndpointServiceGrpcTransport, grpc_helpers), + (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_endpoint_service_host_no_port(transport_name): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_endpoint_service_host_with_port(transport_name): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_service_grpc_lro_client(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_service_grpc_lro_async_client(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = EndpointServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = EndpointServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = EndpointServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = EndpointServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_deployment_monitoring_job_path(): + project = "squid" + location = "clam" + model_deployment_monitoring_job = "whelk" + expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + actual = EndpointServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) + assert expected == actual + + +def test_parse_model_deployment_monitoring_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model_deployment_monitoring_job": "nudibranch", + } + path = EndpointServiceClient.model_deployment_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_model_deployment_monitoring_job_path(path) + assert expected == actual + +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = EndpointServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = EndpointServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_network_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = EndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = EndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = EndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = EndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = EndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = EndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = EndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = EndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = EndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = EndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = EndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py new file mode 100644 index 0000000000..515dca5dbf --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py @@ -0,0 +1,2953 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import transports +from google.cloud.aiplatform_v1beta1.types import feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_featurestore_online_serving_service_client_get_transport_class(): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +def test_featurestore_online_serving_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "true"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "false"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_online_serving_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreOnlineServingServiceClient, FeaturestoreOnlineServingServiceAsyncClient +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +def test_featurestore_online_serving_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_online_serving_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_featurestore_online_serving_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreOnlineServingServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_online_serving_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_online_service.ReadFeatureValuesRequest, + dict, +]) +def test_read_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse( + ) + response = client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + client.read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.ReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse( + )) + response = await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_read_feature_values_async_from_dict(): + await test_read_feature_values_async(request_type=dict) + + +def test_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) + await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_online_service.StreamingReadFeatureValuesRequest, + dict, +]) +def test_streaming_read_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + response = client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_streaming_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + client.streaming_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) + response = await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async_from_dict(): + await test_streaming_read_feature_values_async(request_type=dict) + + +def test_streaming_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) + await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_streaming_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.streaming_read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_streaming_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.streaming_read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreOnlineServingServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ) + +def test_featurestore_online_serving_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_featurestore_online_serving_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'read_feature_values', + 'streaming_read_feature_values', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_featurestore_online_serving_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_featurestore_online_serving_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport() + adc.assert_called_once() + + +def test_featurestore_online_serving_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreOnlineServingServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_online_serving_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers), + (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_featurestore_online_serving_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_online_serving_service_host_no_port(transport_name): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_online_serving_service_host_with_port(transport_name): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_featurestore_online_serving_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + actual = FeaturestoreOnlineServingServiceClient.entity_type_path(project, location, featurestore, entity_type) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FeaturestoreOnlineServingServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FeaturestoreOnlineServingServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py new file mode 100644 index 0000000000..eacdf0b869 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -0,0 +1,8174 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.services.featurestore_service import transports +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None + assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FeaturestoreServiceGrpcTransport, "grpc"), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (FeaturestoreServiceClient, "grpc"), + (FeaturestoreServiceAsyncClient, "grpc_asyncio"), +]) +def test_featurestore_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_featurestore_service_client_get_transport_class(): + transport = FeaturestoreServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +def test_featurestore_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "true"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "false"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreServiceClient, FeaturestoreServiceAsyncClient +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +def test_featurestore_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_featurestore_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", grpc_helpers), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_featurestore_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.CreateFeaturestoreRequest, + dict, +]) +def test_create_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + client.create_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_create_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_featurestore_async_from_dict(): + await test_create_featurestore_async(request_type=dict) + + +def test_create_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_featurestore( + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].featurestore_id + mock_val = 'featurestore_id_value' + assert arg == mock_val + + +def test_create_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_featurestore( + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].featurestore_id + mock_val = 'featurestore_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + featurestore_id='featurestore_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.GetFeaturestoreRequest, + dict, +]) +def test_get_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore( + name='name_value', + etag='etag_value', + state=featurestore.Featurestore.State.STABLE, + ) + response = client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.state == featurestore.Featurestore.State.STABLE + + +def test_get_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + client.get_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_get_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore( + name='name_value', + etag='etag_value', + state=featurestore.Featurestore.State.STABLE, + )) + response = await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.state == featurestore.Featurestore.State.STABLE + + +@pytest.mark.asyncio +async def test_get_featurestore_async_from_dict(): + await test_get_featurestore_async(request_type=dict) + + +def test_get_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + call.return_value = featurestore.Featurestore() + client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) + await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ListFeaturestoresRequest, + dict, +]) +def test_list_featurestores(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse( + next_page_token='next_page_token_value', + ) + response = client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_featurestores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + client.list_featurestores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + +@pytest.mark.asyncio +async def test_list_featurestores_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturestoresRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_featurestores_async_from_dict(): + await test_list_featurestores_async(request_type=dict) + + +def test_list_featurestores_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + call.return_value = featurestore_service.ListFeaturestoresResponse() + client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_featurestores_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) + await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_featurestores_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_featurestores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_featurestores_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_featurestores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), + parent='parent_value', + ) + + +def test_list_featurestores_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_featurestores(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, featurestore.Featurestore) + for i in results) +def test_list_featurestores_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_featurestores(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_featurestores_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_featurestores(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, featurestore.Featurestore) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_featurestores_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_featurestores(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + featurestore_service.UpdateFeaturestoreRequest, + dict, +]) +def test_update_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + client.update_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_update_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_featurestore_async_from_dict(): + await test_update_featurestore_async(request_type=dict) + + +def test_update_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore.name=name_value', + ) in kw['metadata'] + + +def test_update_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = gca_featurestore.Featurestore(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.DeleteFeaturestoreRequest, + dict, +]) +def test_delete_featurestore(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + client.delete_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + +@pytest.mark.asyncio +async def test_delete_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_featurestore_async_from_dict(): + await test_delete_featurestore_async(request_type=dict) + + +def test_delete_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_featurestore( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + + +def test_delete_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name='name_value', + force=True, + ) + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_featurestore( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name='name_value', + force=True, + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.CreateEntityTypeRequest, + dict, +]) +def test_create_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + client.create_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + +@pytest.mark.asyncio +async def test_create_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_entity_type_async_from_dict(): + await test_create_entity_type_async(request_type=dict) + + +def test_create_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_entity_type( + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].entity_type_id + mock_val = 'entity_type_id_value' + assert arg == mock_val + + +def test_create_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_entity_type( + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].entity_type_id + mock_val = 'entity_type_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + entity_type_id='entity_type_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.GetEntityTypeRequest, + dict, +]) +def test_get_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + client.get_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + +@pytest.mark.asyncio +async def test_get_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_entity_type_async_from_dict(): + await test_get_entity_type_async(request_type=dict) + + +def test_get_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + call.return_value = entity_type.EntityType() + client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) + await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ListEntityTypesRequest, + dict, +]) +def test_list_entity_types(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_entity_types_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + client.list_entity_types() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + +@pytest.mark.asyncio +async def test_list_entity_types_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListEntityTypesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_entity_types_async_from_dict(): + await test_list_entity_types_async(request_type=dict) + + +def test_list_entity_types_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + call.return_value = featurestore_service.ListEntityTypesResponse() + client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_entity_types_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) + await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_entity_types_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_entity_types( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_entity_types_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_entity_types( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), + parent='parent_value', + ) + + +def test_list_entity_types_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_entity_types(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, entity_type.EntityType) + for i in results) +def test_list_entity_types_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + pages = list(client.list_entity_types(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_entity_types_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_entity_types(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, entity_type.EntityType) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_entity_types_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_entity_types(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + featurestore_service.UpdateEntityTypeRequest, + dict, +]) +def test_update_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + client.update_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + +@pytest.mark.asyncio +async def test_update_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_entity_type_async_from_dict(): + await test_update_entity_type_async(request_type=dict) + + +def test_update_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + call.return_value = gca_entity_type.EntityType() + client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) + await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type.name=name_value', + ) in kw['metadata'] + + +def test_update_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_entity_type( + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_entity_type( + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = gca_entity_type.EntityType(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.DeleteEntityTypeRequest, + dict, +]) +def test_delete_entity_type(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + client.delete_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + +@pytest.mark.asyncio +async def test_delete_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_entity_type_async_from_dict(): + await test_delete_entity_type_async(request_type=dict) + + +def test_delete_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_entity_type( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + + +def test_delete_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name='name_value', + force=True, + ) + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_entity_type( + name='name_value', + force=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].force + mock_val = True + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name='name_value', + force=True, + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.CreateFeatureRequest, + dict, +]) +def test_create_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + client.create_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + +@pytest.mark.asyncio +async def test_create_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_feature_async_from_dict(): + await test_create_feature_async(request_type=dict) + + +def test_create_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_feature( + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].feature_id + mock_val = 'feature_id_value' + assert arg == mock_val + + +def test_create_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + +@pytest.mark.asyncio +async def test_create_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_feature( + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].feature_id + mock_val = 'feature_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + feature_id='feature_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.BatchCreateFeaturesRequest, + dict, +]) +def test_batch_create_features(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_create_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + client.batch_create_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + +@pytest.mark.asyncio +async def test_batch_create_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchCreateFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_create_features_async_from_dict(): + await test_batch_create_features_async(request_type=dict) + + +def test_batch_create_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_create_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_create_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_features( + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] + assert arg == mock_val + + +def test_batch_create_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_features( + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.GetFeatureRequest, + dict, +]) +def test_get_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature( + name='name_value', + description='description_value', + value_type=feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + ) + response = client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +def test_get_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + client.get_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + +@pytest.mark.asyncio +async def test_get_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature( + name='name_value', + description='description_value', + value_type=feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + )) + response = await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +@pytest.mark.asyncio +async def test_get_feature_async_from_dict(): + await test_get_feature_async(request_type=dict) + + +def test_get_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + call.return_value = feature.Feature() + client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_feature( + featurestore_service.GetFeatureRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_feature( + featurestore_service.GetFeatureRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ListFeaturesRequest, + dict, +]) +def test_list_features(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + client.list_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + +@pytest.mark.asyncio +async def test_list_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_features_async_from_dict(): + await test_list_features_async(request_type=dict) + + +def test_list_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + call.return_value = featurestore_service.ListFeaturesResponse() + client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) + await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_features( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_features( + featurestore_service.ListFeaturesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_features( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_features( + featurestore_service.ListFeaturesRequest(), + parent='parent_value', + ) + + +def test_list_features_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_features(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) + for i in results) +def test_list_features_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = list(client.list_features(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_features(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_features(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + featurestore_service.UpdateFeatureRequest, + dict, +]) +def test_update_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature( + name='name_value', + description='description_value', + value_type=gca_feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + ) + response = client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +def test_update_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + client.update_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + +@pytest.mark.asyncio +async def test_update_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature( + name='name_value', + description='description_value', + value_type=gca_feature.Feature.ValueType.BOOL, + etag='etag_value', + disable_monitoring=True, + )) + response = await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + assert response.disable_monitoring is True + + +@pytest.mark.asyncio +async def test_update_feature_async_from_dict(): + await test_update_feature_async(request_type=dict) + + +def test_update_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + call.return_value = gca_feature.Feature() + client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'feature.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'feature.name=name_value', + ) in kw['metadata'] + + +def test_update_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_feature( + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_feature( + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].feature + mock_val = gca_feature.Feature(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.DeleteFeatureRequest, + dict, +]) +def test_delete_feature(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + client.delete_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + +@pytest.mark.asyncio +async def test_delete_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_feature_async_from_dict(): + await test_delete_feature_async(request_type=dict) + + +def test_delete_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_feature( + featurestore_service.DeleteFeatureRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_feature( + featurestore_service.DeleteFeatureRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ImportFeatureValuesRequest, + dict, +]) +def test_import_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + client.import_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_import_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ImportFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_feature_values_async_from_dict(): + await test_import_feature_values_async(request_type=dict) + + +def test_import_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_import_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_import_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.BatchReadFeatureValuesRequest, + dict, +]) +def test_batch_read_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + client.batch_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchReadFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async_from_dict(): + await test_batch_read_feature_values_async(request_type=dict) + + +def test_batch_read_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = 'featurestore_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore=featurestore_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = 'featurestore_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore=featurestore_value', + ) in kw['metadata'] + + +def test_batch_read_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_feature_values( + featurestore='featurestore_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = 'featurestore_value' + assert arg == mock_val + + +def test_batch_read_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore='featurestore_value', + ) + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_feature_values( + featurestore='featurestore_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].featurestore + mock_val = 'featurestore_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore='featurestore_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.ExportFeatureValuesRequest, + dict, +]) +def test_export_feature_values(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + client.export_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + +@pytest.mark.asyncio +async def test_export_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ExportFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_feature_values_async_from_dict(): + await test_export_feature_values_async(request_type=dict) + + +def test_export_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = 'entity_type_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type_value', + ) in kw['metadata'] + + +def test_export_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + + +def test_export_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].entity_type + mock_val = 'entity_type_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.parametrize("request_type", [ + featurestore_service.SearchFeaturesRequest, + dict, +]) +def test_search_features(request_type, transport: str = 'grpc'): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + client.search_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + +@pytest.mark.asyncio +async def test_search_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.SearchFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_features_async_from_dict(): + await test_search_features_async(request_type=dict) + + +def test_search_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = 'location_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + call.return_value = featurestore_service.SearchFeaturesResponse() + client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'location=location_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = 'location_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) + await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'location=location_value', + ) in kw['metadata'] + + +def test_search_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_features( + location='location_value', + query='query_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].location + mock_val = 'location_value' + assert arg == mock_val + arg = args[0].query + mock_val = 'query_value' + assert arg == mock_val + + +def test_search_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_features( + featurestore_service.SearchFeaturesRequest(), + location='location_value', + query='query_value', + ) + +@pytest.mark.asyncio +async def test_search_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_features( + location='location_value', + query='query_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].location + mock_val = 'location_value' + assert arg == mock_val + arg = args[0].query + mock_val = 'query_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_search_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_features( + featurestore_service.SearchFeaturesRequest(), + location='location_value', + query='query_value', + ) + + +def test_search_features_pager(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('location', ''), + )), + ) + pager = client.search_features(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) + for i in results) +def test_search_features_pages(transport_name: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = list(client.search_features(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_features(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) + for i in responses) + + +@pytest.mark.asyncio +async def test_search_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_features(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = FeaturestoreServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.FeaturestoreServiceGrpcTransport, + ) + +def test_featurestore_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_featurestore_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_featurestore', + 'get_featurestore', + 'list_featurestores', + 'update_featurestore', + 'delete_featurestore', + 'create_entity_type', + 'get_entity_type', + 'list_entity_types', + 'update_entity_type', + 'delete_entity_type', + 'create_feature', + 'batch_create_features', + 'get_feature', + 'list_features', + 'update_feature', + 'delete_feature', + 'import_feature_values', + 'batch_read_feature_values', + 'export_feature_values', + 'search_features', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_featurestore_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_featurestore_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport() + adc.assert_called_once() + + +def test_featurestore_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +def test_featurestore_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreServiceGrpcTransport, grpc_helpers), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_featurestore_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_service_host_no_port(transport_name): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_featurestore_service_host_with_port(transport_name): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_featurestore_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_featurestore_service_grpc_lro_client(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_featurestore_service_grpc_lro_async_client(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + actual = FeaturestoreServiceClient.entity_type_path(project, location, featurestore, entity_type) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_entity_type_path(path) + assert expected == actual + +def test_feature_path(): + project = "winkle" + location = "nautilus" + featurestore = "scallop" + entity_type = "abalone" + feature = "squid" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) + actual = FeaturestoreServiceClient.feature_path(project, location, featurestore, entity_type, feature) + assert expected == actual + + +def test_parse_feature_path(): + expected = { + "project": "clam", + "location": "whelk", + "featurestore": "octopus", + "entity_type": "oyster", + "feature": "nudibranch", + } + path = FeaturestoreServiceClient.feature_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_feature_path(path) + assert expected == actual + +def test_featurestore_path(): + project = "cuttlefish" + location = "mussel" + featurestore = "winkle" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) + actual = FeaturestoreServiceClient.featurestore_path(project, location, featurestore) + assert expected == actual + + +def test_parse_featurestore_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "featurestore": "abalone", + } + path = FeaturestoreServiceClient.featurestore_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_featurestore_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = FeaturestoreServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = FeaturestoreServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = FeaturestoreServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FeaturestoreServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = FeaturestoreServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = FeaturestoreServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = FeaturestoreServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FeaturestoreServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = FeaturestoreServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = FeaturestoreServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py new file mode 100644 index 0000000000..f7bae4cdcf --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -0,0 +1,4694 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import transports +from google.cloud.aiplatform_v1beta1.types import accelerator_type +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexEndpointServiceClient._get_default_mtls_endpoint(None) is None + assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.IndexEndpointServiceGrpcTransport, "grpc"), + (transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexEndpointServiceClient, "grpc"), + (IndexEndpointServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_index_endpoint_service_client_get_transport_class(): + transport = IndexEndpointServiceClient.get_transport_class() + available_transports = [ + transports.IndexEndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexEndpointServiceClient.get_transport_class("grpc") + assert transport == transports.IndexEndpointServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +def test_index_endpoint_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "true"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "false"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + IndexEndpointServiceClient, IndexEndpointServiceAsyncClient +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +def test_index_endpoint_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", grpc_helpers), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_index_endpoint_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = IndexEndpointServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", grpc_helpers), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_endpoint_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.CreateIndexEndpointRequest, + dict, +]) +def test_create_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + client.create_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_create_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.CreateIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_endpoint_async_from_dict(): + await test_create_index_endpoint_async(request_type=dict) + + +def test_create_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index_endpoint( + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + + +def test_create_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index_endpoint( + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.GetIndexEndpointRequest, + dict, +]) +def test_get_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + ) + response = client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +def test_get_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + client.get_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_get_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.GetIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + )) + response = await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +@pytest.mark.asyncio +async def test_get_index_endpoint_async_from_dict(): + await test_get_index_endpoint_async(request_type=dict) + + +def test_get_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + call.return_value = index_endpoint.IndexEndpoint() + client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) + await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.ListIndexEndpointsRequest, + dict, +]) +def test_list_index_endpoints(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexEndpointsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_index_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + client.list_index_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + +@pytest.mark.asyncio +async def test_list_index_endpoints_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.ListIndexEndpointsRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexEndpointsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_from_dict(): + await test_list_index_endpoints_async(request_type=dict) + + +def test_list_index_endpoints_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_index_endpoints_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) + await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_index_endpoints_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_index_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_index_endpoints_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_index_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), + parent='parent_value', + ) + + +def test_list_index_endpoints_pager(transport_name: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_index_endpoints(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) + for i in results) +def test_list_index_endpoints_pages(transport_name: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_index_endpoints(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pager(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_index_endpoints(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pages(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_index_endpoints(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.UpdateIndexEndpointRequest, + dict, +]) +def test_update_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + ) + response = client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +def test_update_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + client.update_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_update_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UpdateIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + enable_private_service_connect=True, + )) + response = await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + assert response.enable_private_service_connect is True + + +@pytest.mark.asyncio +async def test_update_index_endpoint_async_from_dict(): + await test_update_index_endpoint_async(request_type=dict) + + +def test_update_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + + request.index_endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + call.return_value = gca_index_endpoint.IndexEndpoint() + client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + + request.index_endpoint.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) + await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint.name=name_value', + ) in kw['metadata'] + + +def test_update_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.DeleteIndexEndpointRequest, + dict, +]) +def test_delete_index_endpoint(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + client.delete_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeleteIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async_from_dict(): + await test_delete_index_endpoint_async(request_type=dict) + + +def test_delete_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.DeployIndexRequest, + dict, +]) +def test_deploy_index(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + client.deploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + +@pytest.mark.asyncio +async def test_deploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeployIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_index_async_from_dict(): + await test_deploy_index_async(request_type=dict) + + +def test_deploy_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +def test_deploy_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + + +def test_deploy_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + +@pytest.mark.asyncio +async def test_deploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_deploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.UndeployIndexRequest, + dict, +]) +def test_undeploy_index(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + client.undeploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + +@pytest.mark.asyncio +async def test_undeploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UndeployIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_index_async_from_dict(): + await test_undeploy_index_async(request_type=dict) + + +def test_undeploy_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +def test_undeploy_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_index( + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index_id + mock_val = 'deployed_index_id_value' + assert arg == mock_val + + +def test_undeploy_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_index( + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index_id + mock_val = 'deployed_index_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_endpoint_service.MutateDeployedIndexRequest, + dict, +]) +def test_mutate_deployed_index(request_type, transport: str = 'grpc'): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_mutate_deployed_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + client.mutate_deployed_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.MutateDeployedIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async_from_dict(): + await test_mutate_deployed_index_async(request_type=dict) + + +def test_mutate_deployed_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = 'index_endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint_value', + ) in kw['metadata'] + + +def test_mutate_deployed_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_deployed_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + + +def test_mutate_deployed_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_deployed_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = 'index_endpoint_value' + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id='id_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = IndexEndpointServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = IndexEndpointServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.IndexEndpointServiceGrpcTransport, + ) + +def test_index_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.IndexEndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_index_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.IndexEndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_index_endpoint', + 'get_index_endpoint', + 'list_index_endpoints', + 'update_index_endpoint', + 'delete_index_endpoint', + 'deploy_index', + 'undeploy_index', + 'mutate_deployed_index', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_index_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_index_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport() + adc.assert_called_once() + + +def test_index_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexEndpointServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +def test_index_endpoint_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.IndexEndpointServiceGrpcTransport, grpc_helpers), + (transports.IndexEndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_index_endpoint_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_endpoint_service_host_no_port(transport_name): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_endpoint_service_host_with_port(transport_name): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_index_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_endpoint_service_grpc_lro_client(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_endpoint_service_grpc_lro_async_client(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + actual = IndexEndpointServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexEndpointServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_path(path) + assert expected == actual + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + actual = IndexEndpointServiceClient.index_endpoint_path(project, location, index_endpoint) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexEndpointServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = IndexEndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexEndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = IndexEndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexEndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = IndexEndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexEndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = IndexEndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexEndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = IndexEndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexEndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = IndexEndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py new file mode 100644 index 0000000000..cbb1285ab0 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -0,0 +1,3945 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceClient +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.services.index_service import transports +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexServiceClient._get_default_mtls_endpoint(None) is None + assert IndexServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.IndexServiceGrpcTransport, "grpc"), + (transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (IndexServiceClient, "grpc"), + (IndexServiceAsyncClient, "grpc_asyncio"), +]) +def test_index_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_index_service_client_get_transport_class(): + transport = IndexServiceClient.get_transport_class() + available_transports = [ + transports.IndexServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexServiceClient.get_transport_class("grpc") + assert transport == transports.IndexServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +def test_index_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "true"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "false"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + IndexServiceClient, IndexServiceAsyncClient +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +def test_index_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", grpc_helpers), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_index_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = IndexServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", grpc_helpers), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_index_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.CreateIndexRequest, + dict, +]) +def test_create_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + client.create_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + +@pytest.mark.asyncio +async def test_create_index_async(transport: str = 'grpc_asyncio', request_type=index_service.CreateIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_async_from_dict(): + await test_create_index_async(request_type=dict) + + +def test_create_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index( + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + + +def test_create_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index( + index_service.CreateIndexRequest(), + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index( + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index( + index_service.CreateIndexRequest(), + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.GetIndexRequest, + dict, +]) +def test_get_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index.Index) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_get_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + client.get_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + +@pytest.mark.asyncio +async def test_get_index_async(transport: str = 'grpc_asyncio', request_type=index_service.GetIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index.Index( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index.Index) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_index_async_from_dict(): + await test_get_index_async(request_type=dict) + + +def test_get_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + call.return_value = index.Index() + client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index( + index_service.GetIndexRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index( + index_service.GetIndexRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.ListIndexesRequest, + dict, +]) +def test_list_indexes(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_indexes_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + client.list_indexes() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + +@pytest.mark.asyncio +async def test_list_indexes_async(transport: str = 'grpc_asyncio', request_type=index_service.ListIndexesRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_indexes_async_from_dict(): + await test_list_indexes_async(request_type=dict) + + +def test_list_indexes_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + call.return_value = index_service.ListIndexesResponse() + client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_indexes_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) + await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_indexes_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_indexes( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_indexes_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_indexes( + index_service.ListIndexesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_indexes_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_indexes( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_indexes_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_indexes( + index_service.ListIndexesRequest(), + parent='parent_value', + ) + + +def test_list_indexes_pager(transport_name: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_indexes(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, index.Index) + for i in results) +def test_list_indexes_pages(transport_name: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + pages = list(client.list_indexes(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_indexes_async_pager(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_indexes(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index.Index) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_indexes_async_pages(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_indexes(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + index_service.UpdateIndexRequest, + dict, +]) +def test_update_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + client.update_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + +@pytest.mark.asyncio +async def test_update_index_async(transport: str = 'grpc_asyncio', request_type=index_service.UpdateIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_index_async_from_dict(): + await test_update_index_async(request_type=dict) + + +def test_update_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + + request.index.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + + request.index.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index.name=name_value', + ) in kw['metadata'] + + +def test_update_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index( + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index( + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index + mock_val = gca_index.Index(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + index_service.DeleteIndexRequest, + dict, +]) +def test_delete_index(request_type, transport: str = 'grpc'): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + client.delete_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + +@pytest.mark.asyncio +async def test_delete_index_async(transport: str = 'grpc_asyncio', request_type=index_service.DeleteIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_async_from_dict(): + await test_delete_index_async(request_type=dict) + + +def test_delete_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index( + index_service.DeleteIndexRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index( + index_service.DeleteIndexRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = IndexServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = IndexServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.IndexServiceGrpcTransport, + ) + +def test_index_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.IndexServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_index_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.IndexServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_index', + 'get_index', + 'list_indexes', + 'update_index', + 'delete_index', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_index_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_index_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport() + adc.assert_called_once() + + +def test_index_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, + ], +) +def test_index_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.IndexServiceGrpcTransport, grpc_helpers), + (transports.IndexServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_index_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_service_host_no_port(transport_name): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_index_service_host_with_port(transport_name): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_index_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_service_grpc_lro_client(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_service_grpc_lro_async_client(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + actual = IndexServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_path(path) + assert expected == actual + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + actual = IndexServiceClient.index_endpoint_path(project, location, index_endpoint) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = IndexServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = IndexServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = IndexServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = IndexServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = IndexServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = IndexServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py new file mode 100644 index 0000000000..a469763685 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -0,0 +1,10674 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceClient +from google.cloud.aiplatform_v1beta1.services.job_service import pagers +from google.cloud.aiplatform_v1beta1.services.job_service import transports +from google.cloud.aiplatform_v1beta1.types import accelerator_type +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import completion_stats +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobServiceClient._get_default_mtls_endpoint(None) is None + assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), +]) +def test_job_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.JobServiceGrpcTransport, "grpc"), + (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (JobServiceClient, "grpc"), + (JobServiceAsyncClient, "grpc_asyncio"), +]) +def test_job_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_job_service_client_get_transport_class(): + transport = JobServiceClient.get_transport_class() + available_transports = [ + transports.JobServiceGrpcTransport, + ] + assert transport in available_transports + + transport = JobServiceClient.get_transport_class("grpc") + assert transport == transports.JobServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +def test_job_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, JobServiceAsyncClient +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +def test_job_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_job_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = JobServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_job_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateCustomJobRequest, + dict, +]) +def test_create_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + client.create_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + +@pytest.mark.asyncio +async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_custom_job_async_from_dict(): + await test_create_custom_job_async(request_type=dict) + + +def test_create_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = gca_custom_job.CustomJob() + client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].custom_job + mock_val = gca_custom_job.CustomJob(name='name_value') + assert arg == mock_val + + +def test_create_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].custom_job + mock_val = gca_custom_job.CustomJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetCustomJobRequest, + dict, +]) +def test_get_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + client.get_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + +@pytest.mark.asyncio +async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_custom_job_async_from_dict(): + await test_get_custom_job_async(request_type=dict) + + +def test_get_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = custom_job.CustomJob() + client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListCustomJobsRequest, + dict, +]) +def test_list_custom_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_custom_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + client.list_custom_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + +@pytest.mark.asyncio +async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_from_dict(): + await test_list_custom_jobs_async(request_type=dict) + + +def test_list_custom_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = job_service.ListCustomJobsResponse() + client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_custom_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_custom_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_custom_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + + +def test_list_custom_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_custom_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in results) +def test_list_custom_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_custom_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_custom_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_custom_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteCustomJobRequest, + dict, +]) +def test_delete_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + client.delete_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + +@pytest.mark.asyncio +async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_custom_job_async_from_dict(): + await test_delete_custom_job_async(request_type=dict) + + +def test_delete_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelCustomJobRequest, + dict, +]) +def test_cancel_custom_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + client.cancel_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + +@pytest.mark.asyncio +async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_custom_job_async_from_dict(): + await test_cancel_custom_job_async(request_type=dict) + + +def test_cancel_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = None + client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateDataLabelingJobRequest, + dict, +]) +def test_create_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_create_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + client.create_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async_from_dict(): + await test_create_data_labeling_job_async(request_type=dict) + + +def test_create_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = gca_data_labeling_job.DataLabelingJob() + client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].data_labeling_job + mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') + assert arg == mock_val + + +def test_create_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].data_labeling_job + mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetDataLabelingJobRequest, + dict, +]) +def test_get_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_get_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + client.get_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async_from_dict(): + await test_get_data_labeling_job_async(request_type=dict) + + +def test_get_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = data_labeling_job.DataLabelingJob() + client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListDataLabelingJobsRequest, + dict, +]) +def test_list_data_labeling_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_labeling_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + client.list_data_labeling_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_from_dict(): + await test_list_data_labeling_jobs_async(request_type=dict) + + +def test_list_data_labeling_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = job_service.ListDataLabelingJobsResponse() + client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_data_labeling_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_data_labeling_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + + +def test_list_data_labeling_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_labeling_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in results) +def test_list_data_labeling_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_labeling_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_labeling_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_labeling_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteDataLabelingJobRequest, + dict, +]) +def test_delete_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + client.delete_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async_from_dict(): + await test_delete_data_labeling_job_async(request_type=dict) + + +def test_delete_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelDataLabelingJobRequest, + dict, +]) +def test_cancel_data_labeling_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + client.cancel_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async_from_dict(): + await test_cancel_data_labeling_job_async(request_type=dict) + + +def test_cancel_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = None + client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateHyperparameterTuningJobRequest, + dict, +]) +def test_create_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + client.create_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async_from_dict(): + await test_create_hyperparameter_tuning_job_async(request_type=dict) + + +def test_create_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].hyperparameter_tuning_job + mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + assert arg == mock_val + + +def test_create_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].hyperparameter_tuning_job + mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetHyperparameterTuningJobRequest, + dict, +]) +def test_get_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + client.get_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async_from_dict(): + await test_get_hyperparameter_tuning_job_async(request_type=dict) + + +def test_get_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListHyperparameterTuningJobsRequest, + dict, +]) +def test_list_hyperparameter_tuning_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_hyperparameter_tuning_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + client.list_hyperparameter_tuning_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_from_dict(): + await test_list_hyperparameter_tuning_jobs_async(request_type=dict) + + +def test_list_hyperparameter_tuning_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_hyperparameter_tuning_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_hyperparameter_tuning_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + + +def test_list_hyperparameter_tuning_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_hyperparameter_tuning_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in results) +def test_list_hyperparameter_tuning_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_hyperparameter_tuning_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteHyperparameterTuningJobRequest, + dict, +]) +def test_delete_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + client.delete_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async_from_dict(): + await test_delete_hyperparameter_tuning_job_async(request_type=dict) + + +def test_delete_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelHyperparameterTuningJobRequest, + dict, +]) +def test_cancel_hyperparameter_tuning_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + client.cancel_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async_from_dict(): + await test_cancel_hyperparameter_tuning_job_async(request_type=dict) + + +def test_cancel_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = None + client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateBatchPredictionJobRequest, + dict, +]) +def test_create_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + model_version_id='model_version_id_value', + service_account='service_account_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.model_version_id == 'model_version_id_value' + assert response.service_account == 'service_account_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + client.create_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + model_version_id='model_version_id_value', + service_account='service_account_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.model_version_id == 'model_version_id_value' + assert response.service_account == 'service_account_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async_from_dict(): + await test_create_batch_prediction_job_async(request_type=dict) + + +def test_create_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].batch_prediction_job + mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') + assert arg == mock_val + + +def test_create_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].batch_prediction_job + mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.GetBatchPredictionJobRequest, + dict, +]) +def test_get_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + model_version_id='model_version_id_value', + service_account='service_account_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.model_version_id == 'model_version_id_value' + assert response.service_account == 'service_account_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + client.get_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + model_version_id='model_version_id_value', + service_account='service_account_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.model_version_id == 'model_version_id_value' + assert response.service_account == 'service_account_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async_from_dict(): + await test_get_batch_prediction_job_async(request_type=dict) + + +def test_get_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = batch_prediction_job.BatchPredictionJob() + client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListBatchPredictionJobsRequest, + dict, +]) +def test_list_batch_prediction_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_batch_prediction_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + client.list_batch_prediction_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_from_dict(): + await test_list_batch_prediction_jobs_async(request_type=dict) + + +def test_list_batch_prediction_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = job_service.ListBatchPredictionJobsResponse() + client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_batch_prediction_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_batch_prediction_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + + +def test_list_batch_prediction_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_batch_prediction_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in results) +def test_list_batch_prediction_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_batch_prediction_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_batch_prediction_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteBatchPredictionJobRequest, + dict, +]) +def test_delete_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + client.delete_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async_from_dict(): + await test_delete_batch_prediction_job_async(request_type=dict) + + +def test_delete_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CancelBatchPredictionJobRequest, + dict, +]) +def test_cancel_batch_prediction_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + client.cancel_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async_from_dict(): + await test_cancel_batch_prediction_job_async(request_type=dict) + + +def test_cancel_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = None + client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.CreateModelDeploymentMonitoringJobRequest, + dict, +]) +def test_create_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + ) + response = client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +def test_create_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + client.create_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + )) + response = await client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async_from_dict(): + await test_create_model_deployment_monitoring_job_async(request_type=dict) + + +def test_create_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + await client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model_deployment_monitoring_job( + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + + +def test_create_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model_deployment_monitoring_job( + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + dict, +]) +def test_search_model_deployment_monitoring_stats_anomalies(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_model_deployment_monitoring_stats_anomalies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + client.search_model_deployment_monitoring_stats_anomalies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async(transport: str = 'grpc_asyncio', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict(): + await test_search_model_deployment_monitoring_stats_anomalies_async(request_type=dict) + + +def test_search_model_deployment_monitoring_stats_anomalies_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + request.model_deployment_monitoring_job = 'model_deployment_monitoring_job_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job=model_deployment_monitoring_job_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + request.model_deployment_monitoring_job = 'model_deployment_monitoring_job_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) + await client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job=model_deployment_monitoring_job_value', + ) in kw['metadata'] + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = 'model_deployment_monitoring_job_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = 'model_deployment_monitoring_job_value' + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + +def test_search_model_deployment_monitoring_stats_anomalies_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('model_deployment_monitoring_job', ''), + )), + ) + pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) + for i in results) +def test_search_model_deployment_monitoring_stats_anomalies_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = list(client.search_model_deployment_monitoring_stats_anomalies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_model_deployment_monitoring_stats_anomalies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) + for i in responses) + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_model_deployment_monitoring_stats_anomalies(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.GetModelDeploymentMonitoringJobRequest, + dict, +]) +def test_get_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + ) + response = client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +def test_get_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + client.get_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + enable_monitoring_pipeline_logs=True, + )) + response = await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + assert response.enable_monitoring_pipeline_logs is True + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async_from_dict(): + await test_get_model_deployment_monitoring_job_async(request_type=dict) + + +def test_get_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ListModelDeploymentMonitoringJobsRequest, + dict, +]) +def test_list_model_deployment_monitoring_jobs(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_deployment_monitoring_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + client.list_model_deployment_monitoring_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_from_dict(): + await test_list_model_deployment_monitoring_jobs_async(request_type=dict) + + +def test_list_model_deployment_monitoring_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) + await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_deployment_monitoring_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_deployment_monitoring_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_model_deployment_monitoring_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_deployment_monitoring_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent='parent_value', + ) + + +def test_list_model_deployment_monitoring_jobs_pager(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_deployment_monitoring_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in results) +def test_list_model_deployment_monitoring_jobs_pages(transport_name: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_deployment_monitoring_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_deployment_monitoring_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + job_service.UpdateModelDeploymentMonitoringJobRequest, + dict, +]) +def test_update_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + client.update_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async_from_dict(): + await test_update_model_deployment_monitoring_job_async(request_type=dict) + + +def test_update_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + + request.model_deployment_monitoring_job.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + + request.model_deployment_monitoring_job.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job.name=name_value', + ) in kw['metadata'] + + +def test_update_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model_deployment_monitoring_job + mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.DeleteModelDeploymentMonitoringJobRequest, + dict, +]) +def test_delete_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + client.delete_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async_from_dict(): + await test_delete_model_deployment_monitoring_job_async(request_type=dict) + + +def test_delete_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.PauseModelDeploymentMonitoringJobRequest, + dict, +]) +def test_pause_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_pause_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + client.pause_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async_from_dict(): + await test_pause_model_deployment_monitoring_job_async(request_type=dict) + + +def test_pause_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = None + client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_pause_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.pause_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_pause_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.pause_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + job_service.ResumeModelDeploymentMonitoringJobRequest, + dict, +]) +def test_resume_model_deployment_monitoring_job(request_type, transport: str = 'grpc'): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_resume_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + client.resume_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async_from_dict(): + await test_resume_model_deployment_monitoring_job_async(request_type=dict) + + +def test_resume_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = None + client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_resume_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.resume_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_resume_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.resume_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = JobServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = JobServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = JobServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = JobServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.JobServiceGrpcTransport, + ) + +def test_job_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_job_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_custom_job', + 'get_custom_job', + 'list_custom_jobs', + 'delete_custom_job', + 'cancel_custom_job', + 'create_data_labeling_job', + 'get_data_labeling_job', + 'list_data_labeling_jobs', + 'delete_data_labeling_job', + 'cancel_data_labeling_job', + 'create_hyperparameter_tuning_job', + 'get_hyperparameter_tuning_job', + 'list_hyperparameter_tuning_jobs', + 'delete_hyperparameter_tuning_job', + 'cancel_hyperparameter_tuning_job', + 'create_batch_prediction_job', + 'get_batch_prediction_job', + 'list_batch_prediction_jobs', + 'delete_batch_prediction_job', + 'cancel_batch_prediction_job', + 'create_model_deployment_monitoring_job', + 'search_model_deployment_monitoring_stats_anomalies', + 'get_model_deployment_monitoring_job', + 'list_model_deployment_monitoring_jobs', + 'update_model_deployment_monitoring_job', + 'delete_model_deployment_monitoring_job', + 'pause_model_deployment_monitoring_job', + 'resume_model_deployment_monitoring_job', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_job_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_job_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport() + adc.assert_called_once() + + +def test_job_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, + ], +) +def test_job_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.JobServiceGrpcTransport, grpc_helpers), + (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_job_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_job_service_host_no_port(transport_name): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_job_service_host_with_port(transport_name): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_job_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_job_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_service_grpc_lro_client(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_service_grpc_lro_async_client(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_batch_prediction_job_path(): + project = "squid" + location = "clam" + batch_prediction_job = "whelk" + expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) + assert expected == actual + + +def test_parse_batch_prediction_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "batch_prediction_job": "nudibranch", + } + path = JobServiceClient.batch_prediction_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_batch_prediction_job_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "cuttlefish" + location = "mussel" + custom_job = "winkle" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = JobServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "custom_job": "abalone", + } + path = JobServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_data_labeling_job_path(): + project = "squid" + location = "clam" + data_labeling_job = "whelk" + expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) + assert expected == actual + + +def test_parse_data_labeling_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "data_labeling_job": "nudibranch", + } + path = JobServiceClient.data_labeling_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_data_labeling_job_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = JobServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = JobServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = JobServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = JobServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_hyperparameter_tuning_job_path(): + project = "cuttlefish" + location = "mussel" + hyperparameter_tuning_job = "winkle" + expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) + assert expected == actual + + +def test_parse_hyperparameter_tuning_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "hyperparameter_tuning_job": "abalone", + } + path = JobServiceClient.hyperparameter_tuning_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = JobServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = JobServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_deployment_monitoring_job_path(): + project = "cuttlefish" + location = "mussel" + model_deployment_monitoring_job = "winkle" + expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + actual = JobServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) + assert expected == actual + + +def test_parse_model_deployment_monitoring_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model_deployment_monitoring_job": "abalone", + } + path = JobServiceClient.model_deployment_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path) + assert expected == actual + +def test_network_path(): + project = "squid" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = JobServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = JobServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_network_path(path) + assert expected == actual + +def test_tensorboard_path(): + project = "oyster" + location = "nudibranch" + tensorboard = "cuttlefish" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + actual = JobServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + } + path = JobServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_tensorboard_path(path) + assert expected == actual + +def test_trial_path(): + project = "scallop" + location = "abalone" + study = "squid" + trial = "clam" + expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + actual = JobServiceClient.trial_path(project, location, study, trial) + assert expected == actual + + +def test_parse_trial_path(): + expected = { + "project": "whelk", + "location": "octopus", + "study": "oyster", + "trial": "nudibranch", + } + path = JobServiceClient.trial_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_trial_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = JobServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = JobServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = JobServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = JobServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = JobServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = JobServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = JobServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = JobServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = JobServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = JobServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = JobServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (JobServiceClient, transports.JobServiceGrpcTransport), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py new file mode 100644 index 0000000000..ccb56d594e --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -0,0 +1,11200 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceClient +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.services.metadata_service import transports +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MetadataServiceClient._get_default_mtls_endpoint(None) is None + assert MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), +]) +def test_metadata_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MetadataServiceGrpcTransport, "grpc"), + (transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_metadata_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MetadataServiceClient, "grpc"), + (MetadataServiceAsyncClient, "grpc_asyncio"), +]) +def test_metadata_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_metadata_service_client_get_transport_class(): + transport = MetadataServiceClient.get_transport_class() + available_transports = [ + transports.MetadataServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MetadataServiceClient.get_transport_class("grpc") + assert transport == transports.MetadataServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +def test_metadata_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "true"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "false"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_metadata_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + MetadataServiceClient, MetadataServiceAsyncClient +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +def test_metadata_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_metadata_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", grpc_helpers), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_metadata_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_metadata_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = MetadataServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", grpc_helpers), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_metadata_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateMetadataStoreRequest, + dict, +]) +def test_create_metadata_store(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + client.create_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + +@pytest.mark.asyncio +async def test_create_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_metadata_store_async_from_dict(): + await test_create_metadata_store_async(request_type=dict) + + +def test_create_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_store( + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_store + mock_val = gca_metadata_store.MetadataStore(name='name_value') + assert arg == mock_val + arg = args[0].metadata_store_id + mock_val = 'metadata_store_id_value' + assert arg == mock_val + + +def test_create_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_store( + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_store + mock_val = gca_metadata_store.MetadataStore(name='name_value') + assert arg == mock_val + arg = args[0].metadata_store_id + mock_val = 'metadata_store_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetMetadataStoreRequest, + dict, +]) +def test_get_metadata_store(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore( + name='name_value', + description='description_value', + ) + response = client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == 'name_value' + assert response.description == 'description_value' + + +def test_get_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + client.get_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + +@pytest.mark.asyncio +async def test_get_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore( + name='name_value', + description='description_value', + )) + response = await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == 'name_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_metadata_store_async_from_dict(): + await test_get_metadata_store_async(request_type=dict) + + +def test_get_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + call.return_value = metadata_store.MetadataStore() + client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) + await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListMetadataStoresRequest, + dict, +]) +def test_list_metadata_stores(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse( + next_page_token='next_page_token_value', + ) + response = client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_metadata_stores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + client.list_metadata_stores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + +@pytest.mark.asyncio +async def test_list_metadata_stores_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataStoresRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_from_dict(): + await test_list_metadata_stores_async(request_type=dict) + + +def test_list_metadata_stores_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + call.return_value = metadata_service.ListMetadataStoresResponse() + client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_metadata_stores_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) + await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_metadata_stores_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_stores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_metadata_stores_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_stores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), + parent='parent_value', + ) + + +def test_list_metadata_stores_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_metadata_stores(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) + for i in results) +def test_list_metadata_stores_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_stores(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_stores(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_stores(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteMetadataStoreRequest, + dict, +]) +def test_delete_metadata_store(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + client.delete_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + +@pytest.mark.asyncio +async def test_delete_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async_from_dict(): + await test_delete_metadata_store_async(request_type=dict) + + +def test_delete_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateArtifactRequest, + dict, +]) +def test_create_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + client.create_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + +@pytest.mark.asyncio +async def test_create_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_artifact_async_from_dict(): + await test_create_artifact_async(request_type=dict) + + +def test_create_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + call.return_value = gca_artifact.Artifact() + client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_artifact( + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].artifact_id + mock_val = 'artifact_id_value' + assert arg == mock_val + + +def test_create_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + +@pytest.mark.asyncio +async def test_create_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_artifact( + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].artifact_id + mock_val = 'artifact_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetArtifactRequest, + dict, +]) +def test_get_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + client.get_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + +@pytest.mark.asyncio +async def test_get_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_artifact_async_from_dict(): + await test_get_artifact_async(request_type=dict) + + +def test_get_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + call.return_value = artifact.Artifact() + client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_artifact( + metadata_service.GetArtifactRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_artifact( + metadata_service.GetArtifactRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListArtifactsRequest, + dict, +]) +def test_list_artifacts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + client.list_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + +@pytest.mark.asyncio +async def test_list_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListArtifactsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_artifacts_async_from_dict(): + await test_list_artifacts_async(request_type=dict) + + +def test_list_artifacts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + call.return_value = metadata_service.ListArtifactsResponse() + client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) + await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_artifacts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_artifacts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_artifacts( + metadata_service.ListArtifactsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_artifacts( + metadata_service.ListArtifactsRequest(), + parent='parent_value', + ) + + +def test_list_artifacts_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_artifacts(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, artifact.Artifact) + for i in results) +def test_list_artifacts_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + pages = list(client.list_artifacts(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_artifacts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_artifacts(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, artifact.Artifact) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_artifacts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_artifacts(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.UpdateArtifactRequest, + dict, +]) +def test_update_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + client.update_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + +@pytest.mark.asyncio +async def test_update_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_artifact_async_from_dict(): + await test_update_artifact_async(request_type=dict) + + +def test_update_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + call.return_value = gca_artifact.Artifact() + client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact.name=name_value', + ) in kw['metadata'] + + +def test_update_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_artifact( + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_artifact( + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = gca_artifact.Artifact(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteArtifactRequest, + dict, +]) +def test_delete_artifact(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + client.delete_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + +@pytest.mark.asyncio +async def test_delete_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_artifact_async_from_dict(): + await test_delete_artifact_async(request_type=dict) + + +def test_delete_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteArtifactRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_artifact( + metadata_service.DeleteArtifactRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_artifact( + metadata_service.DeleteArtifactRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.PurgeArtifactsRequest, + dict, +]) +def test_purge_artifacts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + client.purge_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + +@pytest.mark.asyncio +async def test_purge_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeArtifactsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_artifacts_async_from_dict(): + await test_purge_artifacts_async(request_type=dict) + + +def test_purge_artifacts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_purge_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeArtifactsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.purge_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_purge_artifacts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_purge_artifacts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_artifacts( + metadata_service.PurgeArtifactsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_purge_artifacts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_purge_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_artifacts( + metadata_service.PurgeArtifactsRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateContextRequest, + dict, +]) +def test_create_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + client.create_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + +@pytest.mark.asyncio +async def test_create_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_context_async_from_dict(): + await test_create_context_async(request_type=dict) + + +def test_create_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + call.return_value = gca_context.Context() + client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_context( + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].context_id + mock_val = 'context_id_value' + assert arg == mock_val + + +def test_create_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_context( + metadata_service.CreateContextRequest(), + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + +@pytest.mark.asyncio +async def test_create_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_context( + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].context_id + mock_val = 'context_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_context( + metadata_service.CreateContextRequest(), + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetContextRequest, + dict, +]) +def test_get_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + client.get_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + +@pytest.mark.asyncio +async def test_get_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_context_async_from_dict(): + await test_get_context_async(request_type=dict) + + +def test_get_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + call.return_value = context.Context() + client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_context( + metadata_service.GetContextRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_context( + metadata_service.GetContextRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListContextsRequest, + dict, +]) +def test_list_contexts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + client.list_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + +@pytest.mark.asyncio +async def test_list_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListContextsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_contexts_async_from_dict(): + await test_list_contexts_async(request_type=dict) + + +def test_list_contexts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + call.return_value = metadata_service.ListContextsResponse() + client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_contexts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) + await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_contexts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_contexts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_contexts( + metadata_service.ListContextsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_contexts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_contexts( + metadata_service.ListContextsRequest(), + parent='parent_value', + ) + + +def test_list_contexts_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_contexts(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, context.Context) + for i in results) +def test_list_contexts_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + pages = list(client.list_contexts(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_contexts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_contexts(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, context.Context) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_contexts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_contexts(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.UpdateContextRequest, + dict, +]) +def test_update_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + client.update_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + +@pytest.mark.asyncio +async def test_update_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_context_async_from_dict(): + await test_update_context_async(request_type=dict) + + +def test_update_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + call.return_value = gca_context.Context() + client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context.name=name_value', + ) in kw['metadata'] + + +def test_update_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_context( + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_context( + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = gca_context.Context(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteContextRequest, + dict, +]) +def test_delete_context(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + client.delete_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + +@pytest.mark.asyncio +async def test_delete_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_context_async_from_dict(): + await test_delete_context_async(request_type=dict) + + +def test_delete_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_context( + metadata_service.DeleteContextRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_context( + metadata_service.DeleteContextRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.PurgeContextsRequest, + dict, +]) +def test_purge_contexts(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + client.purge_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + +@pytest.mark.asyncio +async def test_purge_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeContextsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_contexts_async_from_dict(): + await test_purge_contexts_async(request_type=dict) + + +def test_purge_contexts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_purge_contexts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeContextsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.purge_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_purge_contexts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_purge_contexts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_contexts( + metadata_service.PurgeContextsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_purge_contexts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_purge_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_contexts( + metadata_service.PurgeContextsRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.AddContextArtifactsAndExecutionsRequest, + dict, +]) +def test_add_context_artifacts_and_executions(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse( + ) + response = client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) + + +def test_add_context_artifacts_and_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + client.add_context_artifacts_and_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse( + )) + response = await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async_from_dict(): + await test_add_context_artifacts_and_executions_async(request_type=dict) + + +def test_add_context_artifacts_and_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) + await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +def test_add_context_artifacts_and_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_artifacts_and_executions( + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].artifacts + mock_val = ['artifacts_value'] + assert arg == mock_val + arg = args[0].executions + mock_val = ['executions_value'] + assert arg == mock_val + + +def test_add_context_artifacts_and_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_artifacts_and_executions( + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].artifacts + mock_val = ['artifacts_value'] + assert arg == mock_val + arg = args[0].executions + mock_val = ['executions_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.AddContextChildrenRequest, + dict, +]) +def test_add_context_children(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse( + ) + response = client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +def test_add_context_children_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + client.add_context_children() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + +@pytest.mark.asyncio +async def test_add_context_children_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextChildrenRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse( + )) + response = await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +@pytest.mark.asyncio +async def test_add_context_children_async_from_dict(): + await test_add_context_children_async(request_type=dict) + + +def test_add_context_children_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + call.return_value = metadata_service.AddContextChildrenResponse() + client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_context_children_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) + await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +def test_add_context_children_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_children( + context='context_value', + child_contexts=['child_contexts_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].child_contexts + mock_val = ['child_contexts_value'] + assert arg == mock_val + + +def test_add_context_children_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context='context_value', + child_contexts=['child_contexts_value'], + ) + +@pytest.mark.asyncio +async def test_add_context_children_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_children( + context='context_value', + child_contexts=['child_contexts_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + arg = args[0].child_contexts + mock_val = ['child_contexts_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_add_context_children_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context='context_value', + child_contexts=['child_contexts_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.QueryContextLineageSubgraphRequest, + dict, +]) +def test_query_context_lineage_subgraph(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_context_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + client.query_context_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryContextLineageSubgraphRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async_from_dict(): + await test_query_context_lineage_subgraph_async(request_type=dict) + + +def test_query_context_lineage_subgraph_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = 'context_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context_value', + ) in kw['metadata'] + + +def test_query_context_lineage_subgraph_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_context_lineage_subgraph( + context='context_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + + +def test_query_context_lineage_subgraph_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context='context_value', + ) + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_context_lineage_subgraph( + context='context_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].context + mock_val = 'context_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context='context_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateExecutionRequest, + dict, +]) +def test_create_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + client.create_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + +@pytest.mark.asyncio +async def test_create_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_execution_async_from_dict(): + await test_create_execution_async(request_type=dict) + + +def test_create_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + call.return_value = gca_execution.Execution() + client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_execution( + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].execution_id + mock_val = 'execution_id_value' + assert arg == mock_val + + +def test_create_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_execution( + metadata_service.CreateExecutionRequest(), + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + +@pytest.mark.asyncio +async def test_create_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_execution( + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].execution_id + mock_val = 'execution_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_execution( + metadata_service.CreateExecutionRequest(), + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetExecutionRequest, + dict, +]) +def test_get_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution( + name='name_value', + display_name='display_name_value', + state=execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + client.get_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + +@pytest.mark.asyncio +async def test_get_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution( + name='name_value', + display_name='display_name_value', + state=execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_execution_async_from_dict(): + await test_get_execution_async(request_type=dict) + + +def test_get_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + call.return_value = execution.Execution() + client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_execution( + metadata_service.GetExecutionRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_execution( + metadata_service.GetExecutionRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListExecutionsRequest, + dict, +]) +def test_list_executions(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + client.list_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + +@pytest.mark.asyncio +async def test_list_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_executions_async_from_dict(): + await test_list_executions_async(request_type=dict) + + +def test_list_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + call.return_value = metadata_service.ListExecutionsResponse() + client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) + await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_executions( + metadata_service.ListExecutionsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_executions( + metadata_service.ListExecutionsRequest(), + parent='parent_value', + ) + + +def test_list_executions_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_executions(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, execution.Execution) + for i in results) +def test_list_executions_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + pages = list(client.list_executions(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_executions_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_executions(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, execution.Execution) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_executions_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_executions(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.UpdateExecutionRequest, + dict, +]) +def test_update_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + client.update_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + +@pytest.mark.asyncio +async def test_update_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_execution_async_from_dict(): + await test_update_execution_async(request_type=dict) + + +def test_update_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + call.return_value = gca_execution.Execution() + client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution.name=name_value', + ) in kw['metadata'] + + +def test_update_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_execution( + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_execution( + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = gca_execution.Execution(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.DeleteExecutionRequest, + dict, +]) +def test_delete_execution(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + client.delete_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + +@pytest.mark.asyncio +async def test_delete_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_execution_async_from_dict(): + await test_delete_execution_async(request_type=dict) + + +def test_delete_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteExecutionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_execution( + metadata_service.DeleteExecutionRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_execution( + metadata_service.DeleteExecutionRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.PurgeExecutionsRequest, + dict, +]) +def test_purge_executions(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_purge_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + client.purge_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + +@pytest.mark.asyncio +async def test_purge_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.PurgeExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_purge_executions_async_from_dict(): + await test_purge_executions_async(request_type=dict) + + +def test_purge_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_purge_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.PurgeExecutionsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.purge_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_purge_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.purge_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_purge_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.purge_executions( + metadata_service.PurgeExecutionsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_purge_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.purge_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.purge_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_purge_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.purge_executions( + metadata_service.PurgeExecutionsRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.AddExecutionEventsRequest, + dict, +]) +def test_add_execution_events(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse( + ) + response = client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +def test_add_execution_events_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + client.add_execution_events() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + +@pytest.mark.asyncio +async def test_add_execution_events_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddExecutionEventsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse( + )) + response = await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +@pytest.mark.asyncio +async def test_add_execution_events_async_from_dict(): + await test_add_execution_events_async(request_type=dict) + + +def test_add_execution_events_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + call.return_value = metadata_service.AddExecutionEventsResponse() + client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_execution_events_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) + await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +def test_add_execution_events_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_execution_events( + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + arg = args[0].events + mock_val = [event.Event(artifact='artifact_value')] + assert arg == mock_val + + +def test_add_execution_events_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_execution_events( + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + arg = args[0].events + mock_val = [event.Event(artifact='artifact_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.QueryExecutionInputsAndOutputsRequest, + dict, +]) +def test_query_execution_inputs_and_outputs(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_execution_inputs_and_outputs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + client.query_execution_inputs_and_outputs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async_from_dict(): + await test_query_execution_inputs_and_outputs_async(request_type=dict) + + +def test_query_execution_inputs_and_outputs_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = 'execution_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution_value', + ) in kw['metadata'] + + +def test_query_execution_inputs_and_outputs_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_execution_inputs_and_outputs( + execution='execution_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + + +def test_query_execution_inputs_and_outputs_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution='execution_value', + ) + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_execution_inputs_and_outputs( + execution='execution_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].execution + mock_val = 'execution_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution='execution_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.CreateMetadataSchemaRequest, + dict, +]) +def test_create_metadata_schema(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + ) + response = client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +def test_create_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + client.create_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + +@pytest.mark.asyncio +async def test_create_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataSchemaRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + )) + response = await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async_from_dict(): + await test_create_metadata_schema_async(request_type=dict) + + +def test_create_metadata_schema_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + call.return_value = gca_metadata_schema.MetadataSchema() + client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) + await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_metadata_schema_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_schema( + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_schema + mock_val = gca_metadata_schema.MetadataSchema(name='name_value') + assert arg == mock_val + arg = args[0].metadata_schema_id + mock_val = 'metadata_schema_id_value' + assert arg == mock_val + + +def test_create_metadata_schema_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_schema( + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].metadata_schema + mock_val = gca_metadata_schema.MetadataSchema(name='name_value') + assert arg == mock_val + arg = args[0].metadata_schema_id + mock_val = 'metadata_schema_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.GetMetadataSchemaRequest, + dict, +]) +def test_get_metadata_schema(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + ) + response = client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +def test_get_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + client.get_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + +@pytest.mark.asyncio +async def test_get_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataSchemaRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + )) + response = await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async_from_dict(): + await test_get_metadata_schema_async(request_type=dict) + + +def test_get_metadata_schema_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + call.return_value = metadata_schema.MetadataSchema() + client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) + await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_metadata_schema_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_schema( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_metadata_schema_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_schema( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + metadata_service.ListMetadataSchemasRequest, + dict, +]) +def test_list_metadata_schemas(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse( + next_page_token='next_page_token_value', + ) + response = client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_metadata_schemas_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + client.list_metadata_schemas() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataSchemasRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_from_dict(): + await test_list_metadata_schemas_async(request_type=dict) + + +def test_list_metadata_schemas_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + call.return_value = metadata_service.ListMetadataSchemasResponse() + client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) + await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_metadata_schemas_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_schemas( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_metadata_schemas_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_schemas( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), + parent='parent_value', + ) + + +def test_list_metadata_schemas_pager(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_metadata_schemas(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) + for i in results) +def test_list_metadata_schemas_pages(transport_name: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_schemas(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_schemas(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_schemas(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + metadata_service.QueryArtifactLineageSubgraphRequest, + dict, +]) +def test_query_artifact_lineage_subgraph(request_type, transport: str = 'grpc'): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_artifact_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + client.query_artifact_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async_from_dict(): + await test_query_artifact_lineage_subgraph_async(request_type=dict) + + +def test_query_artifact_lineage_subgraph_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = 'artifact_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact=artifact_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = 'artifact_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact=artifact_value', + ) in kw['metadata'] + + +def test_query_artifact_lineage_subgraph_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_artifact_lineage_subgraph( + artifact='artifact_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = 'artifact_value' + assert arg == mock_val + + +def test_query_artifact_lineage_subgraph_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact='artifact_value', + ) + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_artifact_lineage_subgraph( + artifact='artifact_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].artifact + mock_val = 'artifact_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact='artifact_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MetadataServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MetadataServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = MetadataServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.MetadataServiceGrpcTransport, + ) + +def test_metadata_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_metadata_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_metadata_store', + 'get_metadata_store', + 'list_metadata_stores', + 'delete_metadata_store', + 'create_artifact', + 'get_artifact', + 'list_artifacts', + 'update_artifact', + 'delete_artifact', + 'purge_artifacts', + 'create_context', + 'get_context', + 'list_contexts', + 'update_context', + 'delete_context', + 'purge_contexts', + 'add_context_artifacts_and_executions', + 'add_context_children', + 'query_context_lineage_subgraph', + 'create_execution', + 'get_execution', + 'list_executions', + 'update_execution', + 'delete_execution', + 'purge_executions', + 'add_execution_events', + 'query_execution_inputs_and_outputs', + 'create_metadata_schema', + 'get_metadata_schema', + 'list_metadata_schemas', + 'query_artifact_lineage_subgraph', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_metadata_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_metadata_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport() + adc.assert_called_once() + + +def test_metadata_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MetadataServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +def test_metadata_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MetadataServiceGrpcTransport, grpc_helpers), + (transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_metadata_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_metadata_service_host_no_port(transport_name): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_metadata_service_host_with_port(transport_name): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_metadata_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_metadata_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_metadata_service_grpc_lro_client(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_metadata_service_grpc_lro_async_client(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + actual = MetadataServiceClient.artifact_path(project, location, metadata_store, artifact) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = MetadataServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_artifact_path(path) + assert expected == actual + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + actual = MetadataServiceClient.context_path(project, location, metadata_store, context) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = MetadataServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_context_path(path) + assert expected == actual + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + actual = MetadataServiceClient.execution_path(project, location, metadata_store, execution) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = MetadataServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_execution_path(path) + assert expected == actual + +def test_metadata_schema_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + metadata_schema = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) + actual = MetadataServiceClient.metadata_schema_path(project, location, metadata_store, metadata_schema) + assert expected == actual + + +def test_parse_metadata_schema_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "metadata_schema": "mussel", + } + path = MetadataServiceClient.metadata_schema_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_schema_path(path) + assert expected == actual + +def test_metadata_store_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) + actual = MetadataServiceClient.metadata_store_path(project, location, metadata_store) + assert expected == actual + + +def test_parse_metadata_store_path(): + expected = { + "project": "abalone", + "location": "squid", + "metadata_store": "clam", + } + path = MetadataServiceClient.metadata_store_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_store_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MetadataServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = MetadataServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format(folder=folder, ) + actual = MetadataServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = MetadataServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MetadataServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = MetadataServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format(project=project, ) + actual = MetadataServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = MetadataServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MetadataServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = MetadataServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = MetadataServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py new file mode 100644 index 0000000000..cbe65d72ce --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -0,0 +1,3321 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceClient +from google.cloud.aiplatform_v1beta1.services.migration_service import pagers +from google.cloud.aiplatform_v1beta1.services.migration_service import transports +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MigrationServiceClient._get_default_mtls_endpoint(None) is None + assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), +]) +def test_migration_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MigrationServiceGrpcTransport, "grpc"), + (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (MigrationServiceClient, "grpc"), + (MigrationServiceAsyncClient, "grpc_asyncio"), +]) +def test_migration_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_migration_service_client_get_transport_class(): + transport = MigrationServiceClient.get_transport_class() + available_transports = [ + transports.MigrationServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MigrationServiceClient.get_transport_class("grpc") + assert transport == transports.MigrationServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +def test_migration_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, MigrationServiceAsyncClient +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +def test_migration_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", grpc_helpers), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_migration_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = MigrationServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", grpc_helpers), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_migration_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + migration_service.SearchMigratableResourcesRequest, + dict, +]) +def test_search_migratable_resources(request_type, transport: str = 'grpc'): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_migratable_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + client.search_migratable_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + +@pytest.mark.asyncio +async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_from_dict(): + await test_search_migratable_resources_async(request_type=dict) + + +def test_search_migratable_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = migration_service.SearchMigratableResourcesResponse() + client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_migratable_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_search_migratable_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_search_migratable_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + + +def test_search_migratable_resources_pager(transport_name: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.search_migratable_resources(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in results) +def test_search_migratable_resources_pages(transport_name: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = list(client.search_migratable_resources(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pager(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_migratable_resources(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in responses) + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pages(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_migratable_resources(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + migration_service.BatchMigrateResourcesRequest, + dict, +]) +def test_batch_migrate_resources(request_type, transport: str = 'grpc'): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_migrate_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + client.batch_migrate_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async_from_dict(): + await test_batch_migrate_resources_async(request_type=dict) + + +def test_batch_migrate_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_migrate_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].migrate_resource_requests + mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + assert arg == mock_val + + +def test_batch_migrate_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].migrate_resource_requests + mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MigrationServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MigrationServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = MigrationServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.MigrationServiceGrpcTransport, + ) + +def test_migration_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_migration_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'search_migratable_resources', + 'batch_migrate_resources', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_migration_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_migration_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport() + adc.assert_called_once() + + +def test_migration_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MigrationServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, + ], +) +def test_migration_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MigrationServiceGrpcTransport, grpc_helpers), + (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_migration_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_migration_service_host_no_port(transport_name): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_migration_service_host_with_port(transport_name): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_migration_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_migration_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_migration_service_grpc_lro_client(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_migration_service_grpc_lro_async_client(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotated_dataset_path(): + project = "squid" + dataset = "clam" + annotated_dataset = "whelk" + expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) + assert expected == actual + + +def test_parse_annotated_dataset_path(): + expected = { + "project": "octopus", + "dataset": "oyster", + "annotated_dataset": "nudibranch", + } + path = MigrationServiceClient.annotated_dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_annotated_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "winkle", + "dataset": "nautilus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "clam", + "location": "whelk", + "dataset": "octopus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "mussel", + "location": "winkle", + "dataset": "nautilus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_model_path(): + project = "scallop" + location = "abalone" + model = "squid" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_path(): + project = "oyster" + location = "nudibranch" + model = "cuttlefish" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "mussel", + "location": "winkle", + "model": "nautilus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_version_path(): + project = "scallop" + model = "abalone" + version = "squid" + expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + actual = MigrationServiceClient.version_path(project, model, version) + assert expected == actual + + +def test_parse_version_path(): + expected = { + "project": "clam", + "model": "whelk", + "version": "octopus", + } + path = MigrationServiceClient.version_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_version_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MigrationServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = MigrationServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format(folder=folder, ) + actual = MigrationServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = MigrationServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MigrationServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = MigrationServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format(project=project, ) + actual = MigrationServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = MigrationServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MigrationServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = MigrationServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = MigrationServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py new file mode 100644 index 0000000000..3eee8e785b --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -0,0 +1,7099 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceClient +from google.cloud.aiplatform_v1beta1.services.model_service import pagers +from google.cloud.aiplatform_v1beta1.services.model_service import transports +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation as gca_model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ModelServiceClient._get_default_mtls_endpoint(None) is None + assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), +]) +def test_model_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ModelServiceGrpcTransport, "grpc"), + (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (ModelServiceClient, "grpc"), + (ModelServiceAsyncClient, "grpc_asyncio"), +]) +def test_model_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_model_service_client_get_transport_class(): + transport = ModelServiceClient.get_transport_class() + available_transports = [ + transports.ModelServiceGrpcTransport, + ] + assert transport in available_transports + + transport = ModelServiceClient.get_transport_class("grpc") + assert transport == transports.ModelServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +def test_model_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, ModelServiceAsyncClient +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +def test_model_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", grpc_helpers), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_model_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ModelServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", grpc_helpers), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_model_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.UploadModelRequest, + dict, +]) +def test_upload_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_upload_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + client.upload_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + +@pytest.mark.asyncio +async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_upload_model_async_from_dict(): + await test_upload_model_async(request_type=dict) + + +def test_upload_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_upload_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_upload_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + + +def test_upload_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_upload_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_upload_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.GetModelRequest, + dict, +]) +def test_get_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelsRequest, + dict, +]) +def test_list_models(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = model_service.ListModelsResponse() + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_models_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_models_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) +def test_list_models_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_models(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelVersionsRequest, + dict, +]) +def test_list_model_versions(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelVersionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelVersionsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_versions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + client.list_model_versions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelVersionsRequest() + +@pytest.mark.asyncio +async def test_list_model_versions_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelVersionsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelVersionsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelVersionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelVersionsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_versions_async_from_dict(): + await test_list_model_versions_async(request_type=dict) + + +def test_list_model_versions_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelVersionsRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + call.return_value = model_service.ListModelVersionsResponse() + client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_versions_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelVersionsRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelVersionsResponse()) + await client.list_model_versions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_list_model_versions_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_versions( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_list_model_versions_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_versions( + model_service.ListModelVersionsRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_list_model_versions_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelVersionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelVersionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_versions( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_versions_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_versions( + model_service.ListModelVersionsRequest(), + name='name_value', + ) + + +def test_list_model_versions_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('name', ''), + )), + ) + pager = client.list_model_versions(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) +def test_list_model_versions_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_versions(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_versions_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_versions(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_versions_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_versions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelVersionsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelVersionsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_versions(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + model_service.UpdateModelRequest, + dict, +]) +def test_update_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_update_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + client.update_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + +@pytest.mark.asyncio +async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_model_async_from_dict(): + await test_update_model_async(request_type=dict) + + +def test_update_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = gca_model.Model() + client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=name_value', + ) in kw['metadata'] + + +def test_update_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = gca_model.Model(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.UpdateExplanationDatasetRequest, + dict, +]) +def test_update_explanation_dataset(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_explanation_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_explanation_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateExplanationDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_explanation_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_explanation_dataset), + '__call__') as call: + client.update_explanation_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateExplanationDatasetRequest() + +@pytest.mark.asyncio +async def test_update_explanation_dataset_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateExplanationDatasetRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_explanation_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_explanation_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateExplanationDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_explanation_dataset_async_from_dict(): + await test_update_explanation_dataset_async(request_type=dict) + + +def test_update_explanation_dataset_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateExplanationDatasetRequest() + + request.model = 'model_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_explanation_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_explanation_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model=model_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_explanation_dataset_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateExplanationDatasetRequest() + + request.model = 'model_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_explanation_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_explanation_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model=model_value', + ) in kw['metadata'] + + +def test_update_explanation_dataset_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_explanation_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_explanation_dataset( + model='model_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = 'model_value' + assert arg == mock_val + + +def test_update_explanation_dataset_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_explanation_dataset( + model_service.UpdateExplanationDatasetRequest(), + model='model_value', + ) + +@pytest.mark.asyncio +async def test_update_explanation_dataset_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_explanation_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_explanation_dataset( + model='model_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = 'model_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_explanation_dataset_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_explanation_dataset( + model_service.UpdateExplanationDatasetRequest(), + model='model_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.DeleteModelRequest, + dict, +]) +def test_delete_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_async_from_dict(): + await test_delete_model_async(request_type=dict) + + +def test_delete_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.DeleteModelVersionRequest, + dict, +]) +def test_delete_model_version(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_version_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + client.delete_model_version() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + +@pytest.mark.asyncio +async def test_delete_model_version_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelVersionRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelVersionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_version_async_from_dict(): + await test_delete_model_version_async(request_type=dict) + + +def test_delete_model_version_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelVersionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_version_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelVersionRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model_version(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_model_version_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_version( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_model_version_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_version( + model_service.DeleteModelVersionRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_model_version_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_version), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_version( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_model_version_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_version( + model_service.DeleteModelVersionRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.MergeVersionAliasesRequest, + dict, +]) +def test_merge_version_aliases(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.MergeVersionAliasesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_merge_version_aliases_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + client.merge_version_aliases() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.MergeVersionAliasesRequest() + +@pytest.mark.asyncio +async def test_merge_version_aliases_async(transport: str = 'grpc_asyncio', request_type=model_service.MergeVersionAliasesRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + version_id='version_id_value', + version_aliases=['version_aliases_value'], + display_name='display_name_value', + description='description_value', + version_description='version_description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.MergeVersionAliasesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.version_id == 'version_id_value' + assert response.version_aliases == ['version_aliases_value'] + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.version_description == 'version_description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_merge_version_aliases_async_from_dict(): + await test_merge_version_aliases_async(request_type=dict) + + +def test_merge_version_aliases_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.MergeVersionAliasesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + call.return_value = model.Model() + client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_merge_version_aliases_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.MergeVersionAliasesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.merge_version_aliases(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_merge_version_aliases_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.merge_version_aliases( + name='name_value', + version_aliases=['version_aliases_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].version_aliases + mock_val = ['version_aliases_value'] + assert arg == mock_val + + +def test_merge_version_aliases_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.merge_version_aliases( + model_service.MergeVersionAliasesRequest(), + name='name_value', + version_aliases=['version_aliases_value'], + ) + +@pytest.mark.asyncio +async def test_merge_version_aliases_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.merge_version_aliases), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.merge_version_aliases( + name='name_value', + version_aliases=['version_aliases_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].version_aliases + mock_val = ['version_aliases_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_merge_version_aliases_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.merge_version_aliases( + model_service.MergeVersionAliasesRequest(), + name='name_value', + version_aliases=['version_aliases_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ExportModelRequest, + dict, +]) +def test_export_model(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_model_async_from_dict(): + await test_export_model_async(request_type=dict) + + +def test_export_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_export_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + assert arg == mock_val + + +def test_export_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].output_config + mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ImportModelEvaluationRequest, + dict, +]) +def test_import_model_evaluation(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + ) + response = client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +def test_import_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + client.import_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + +@pytest.mark.asyncio +async def test_import_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.ImportModelEvaluationRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + )) + response = await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +@pytest.mark.asyncio +async def test_import_model_evaluation_async_from_dict(): + await test_import_model_evaluation_async(request_type=dict) + + +def test_import_model_evaluation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + call.return_value = gca_model_evaluation.ModelEvaluation() + client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_evaluation.ModelEvaluation()) + await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_import_model_evaluation_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_model_evaluation( + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name='name_value') + assert arg == mock_val + + +def test_import_model_evaluation_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_model_evaluation( + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent='parent_value', + model_evaluation=gca_model_evaluation.ModelEvaluation(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.GetModelEvaluationRequest, + dict, +]) +def test_get_model_evaluation(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + ) + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( + name='name_value', + display_name='display_name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + )) + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async_from_dict(): + await test_get_model_evaluation_async(request_type=dict) + + +def test_get_model_evaluation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = model_evaluation.ModelEvaluation() + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_evaluation_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_evaluation_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelEvaluationsRequest, + dict, +]) +def test_list_model_evaluations(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_from_dict(): + await test_list_model_evaluations_async(request_type=dict) + + +def test_list_model_evaluations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationsResponse() + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_evaluations_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_model_evaluations_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluations_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) +def test_list_model_evaluations_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluations(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + model_service.GetModelEvaluationSliceRequest, + dict, +]) +def test_get_model_evaluation_slice(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + ) + response = client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +def test_get_model_evaluation_slice_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + client.get_model_evaluation_slice() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + )) + response = await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async_from_dict(): + await test_get_model_evaluation_slice_async(request_type=dict) + + +def test_get_model_evaluation_slice_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_model_evaluation_slice_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_model_evaluation_slice_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + model_service.ListModelEvaluationSlicesRequest, + dict, +]) +def test_list_model_evaluation_slices(request_type, transport: str = 'grpc'): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluation_slices_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + client.list_model_evaluation_slices() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_from_dict(): + await test_list_model_evaluation_slices_async(request_type=dict) + + +def test_list_model_evaluation_slices_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationSlicesResponse() + client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_model_evaluation_slices_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_model_evaluation_slices_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluation_slices_pager(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluation_slices(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in results) +def test_list_model_evaluation_slices_pages(transport_name: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluation_slices(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluation_slices(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluation_slices(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ModelServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ModelServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = ModelServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ModelServiceGrpcTransport, + ) + +def test_model_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_model_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'upload_model', + 'get_model', + 'list_models', + 'list_model_versions', + 'update_model', + 'update_explanation_dataset', + 'delete_model', + 'delete_model_version', + 'merge_version_aliases', + 'export_model', + 'import_model_evaluation', + 'get_model_evaluation', + 'list_model_evaluations', + 'get_model_evaluation_slice', + 'list_model_evaluation_slices', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_model_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_model_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport() + adc.assert_called_once() + + +def test_model_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +def test_model_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ModelServiceGrpcTransport, grpc_helpers), + (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_model_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_model_service_host_no_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_model_service_host_with_port(transport_name): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_model_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_model_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_service_grpc_lro_client(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_service_grpc_lro_async_client(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = ModelServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = ModelServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = ModelServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = ModelServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_evaluation_path(): + project = "squid" + location = "clam" + model = "whelk" + evaluation = "octopus" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) + assert expected == actual + + +def test_parse_model_evaluation_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "model": "cuttlefish", + "evaluation": "mussel", + } + path = ModelServiceClient.model_evaluation_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_path(path) + assert expected == actual + +def test_model_evaluation_slice_path(): + project = "winkle" + location = "nautilus" + model = "scallop" + evaluation = "abalone" + slice = "squid" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) + assert expected == actual + + +def test_parse_model_evaluation_slice_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + "evaluation": "oyster", + "slice": "nudibranch", + } + path = ModelServiceClient.model_evaluation_slice_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_slice_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "cuttlefish" + location = "mussel" + training_pipeline = "winkle" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "training_pipeline": "abalone", + } + path = ModelServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ModelServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ModelServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ModelServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ModelServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ModelServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ModelServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ModelServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ModelServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ModelServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ModelServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = ModelServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py new file mode 100644 index 0000000000..431c7ccee7 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -0,0 +1,5529 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1beta1.services.pipeline_service import transports +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_failure_policy +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.cloud.aiplatform_v1beta1.types import value +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PipelineServiceClient._get_default_mtls_endpoint(None) is None + assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), +]) +def test_pipeline_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PipelineServiceGrpcTransport, "grpc"), + (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PipelineServiceClient, "grpc"), + (PipelineServiceAsyncClient, "grpc_asyncio"), +]) +def test_pipeline_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_pipeline_service_client_get_transport_class(): + transport = PipelineServiceClient.get_transport_class() + available_transports = [ + transports.PipelineServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PipelineServiceClient.get_transport_class("grpc") + assert transport == transports.PipelineServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, PipelineServiceAsyncClient +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +def test_pipeline_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", grpc_helpers), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_pipeline_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PipelineServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", grpc_helpers), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_pipeline_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CreateTrainingPipelineRequest, + dict, +]) +def test_create_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_create_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + client.create_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_training_pipeline_async_from_dict(): + await test_create_training_pipeline_async(request_type=dict) + + +def test_create_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = gca_training_pipeline.TrainingPipeline() + client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].training_pipeline + mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') + assert arg == mock_val + + +def test_create_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].training_pipeline + mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.GetTrainingPipelineRequest, + dict, +]) +def test_get_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_get_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + client.get_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + model_id='model_id_value', + parent_model='parent_model_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.model_id == 'model_id_value' + assert response.parent_model == 'parent_model_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_training_pipeline_async_from_dict(): + await test_get_training_pipeline_async(request_type=dict) + + +def test_get_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = training_pipeline.TrainingPipeline() + client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.ListTrainingPipelinesRequest, + dict, +]) +def test_list_training_pipelines(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_training_pipelines_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + client.list_training_pipelines() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + +@pytest.mark.asyncio +async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_from_dict(): + await test_list_training_pipelines_async(request_type=dict) + + +def test_list_training_pipelines_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_training_pipelines_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_training_pipelines_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_training_pipelines_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + + +def test_list_training_pipelines_pager(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_training_pipelines(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in results) +def test_list_training_pipelines_pages(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = list(client.list_training_pipelines(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_training_pipelines(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_training_pipelines(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + pipeline_service.DeleteTrainingPipelineRequest, + dict, +]) +def test_delete_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + client.delete_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async_from_dict(): + await test_delete_training_pipeline_async(request_type=dict) + + +def test_delete_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CancelTrainingPipelineRequest, + dict, +]) +def test_cancel_training_pipeline(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + client.cancel_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async_from_dict(): + await test_cancel_training_pipeline_async(request_type=dict) + + +def test_cancel_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = None + client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CreatePipelineJobRequest, + dict, +]) +def test_create_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + ) + response = client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +def test_create_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + client.create_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + +@pytest.mark.asyncio +async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + )) + response = await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async_from_dict(): + await test_create_pipeline_job_async(request_type=dict) + + +def test_create_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = gca_pipeline_job.PipelineJob() + client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].pipeline_job + mock_val = gca_pipeline_job.PipelineJob(name='name_value') + assert arg == mock_val + arg = args[0].pipeline_job_id + mock_val = 'pipeline_job_id_value' + assert arg == mock_val + + +def test_create_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].pipeline_job + mock_val = gca_pipeline_job.PipelineJob(name='name_value') + assert arg == mock_val + arg = args[0].pipeline_job_id + mock_val = 'pipeline_job_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.GetPipelineJobRequest, + dict, +]) +def test_get_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + ) + response = client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +def test_get_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + client.get_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + +@pytest.mark.asyncio +async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + template_uri='template_uri_value', + )) + response = await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + assert response.template_uri == 'template_uri_value' + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async_from_dict(): + await test_get_pipeline_job_async(request_type=dict) + + +def test_get_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = pipeline_job.PipelineJob() + client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.ListPipelineJobsRequest, + dict, +]) +def test_list_pipeline_jobs(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_pipeline_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + client.list_pipeline_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_from_dict(): + await test_list_pipeline_jobs_async(request_type=dict) + + +def test_list_pipeline_jobs_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = pipeline_service.ListPipelineJobsResponse() + client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_pipeline_jobs_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_pipeline_jobs_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + + +def test_list_pipeline_jobs_pager(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_pipeline_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in results) +def test_list_pipeline_jobs_pages(transport_name: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_pipeline_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_pipeline_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_pipeline_jobs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + pipeline_service.DeletePipelineJobRequest, + dict, +]) +def test_delete_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + client.delete_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async_from_dict(): + await test_delete_pipeline_job_async(request_type=dict) + + +def test_delete_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + pipeline_service.CancelPipelineJobRequest, + dict, +]) +def test_cancel_pipeline_job(request_type, transport: str = 'grpc'): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + client.cancel_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async_from_dict(): + await test_cancel_pipeline_job_async(request_type=dict) + + +def test_cancel_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = None + client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_cancel_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_cancel_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PipelineServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PipelineServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = PipelineServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PipelineServiceGrpcTransport, + ) + +def test_pipeline_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_pipeline_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_training_pipeline', + 'get_training_pipeline', + 'list_training_pipelines', + 'delete_training_pipeline', + 'cancel_training_pipeline', + 'create_pipeline_job', + 'get_pipeline_job', + 'list_pipeline_jobs', + 'delete_pipeline_job', + 'cancel_pipeline_job', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_pipeline_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_pipeline_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport() + adc.assert_called_once() + + +def test_pipeline_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PipelineServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, + ], +) +def test_pipeline_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PipelineServiceGrpcTransport, grpc_helpers), + (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_pipeline_service_host_no_port(transport_name): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_pipeline_service_host_with_port(transport_name): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_pipeline_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_pipeline_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_pipeline_service_grpc_lro_client(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_pipeline_service_grpc_lro_async_client(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = PipelineServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_artifact_path(path) + assert expected == actual + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + actual = PipelineServiceClient.context_path(project, location, metadata_store, context) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = PipelineServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_context_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "oyster" + location = "nudibranch" + custom_job = "cuttlefish" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = PipelineServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "custom_job": "nautilus", + } + path = PipelineServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_endpoint_path(): + project = "scallop" + location = "abalone" + endpoint = "squid" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PipelineServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "clam", + "location": "whelk", + "endpoint": "octopus", + } + path = PipelineServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = PipelineServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_execution_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PipelineServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = PipelineServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_model_path(path) + assert expected == actual + +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = PipelineServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = PipelineServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_network_path(path) + assert expected == actual + +def test_pipeline_job_path(): + project = "scallop" + location = "abalone" + pipeline_job = "squid" + expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "clam", + "location": "whelk", + "pipeline_job": "octopus", + } + path = PipelineServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_pipeline_job_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "oyster" + location = "nudibranch" + training_pipeline = "cuttlefish" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "mussel", + "location": "winkle", + "training_pipeline": "nautilus", + } + path = PipelineServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PipelineServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = PipelineServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = PipelineServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = PipelineServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PipelineServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = PipelineServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = PipelineServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = PipelineServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PipelineServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = PipelineServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PipelineServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py new file mode 100644 index 0000000000..788b48d0c4 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -0,0 +1,3307 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api import httpbody_pb2 # type: ignore +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceClient +from google.cloud.aiplatform_v1beta1.services.prediction_service import transports +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), +]) +def test_prediction_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PredictionServiceGrpcTransport, "grpc"), + (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (PredictionServiceClient, "grpc"), + (PredictionServiceAsyncClient, "grpc_asyncio"), +]) +def test_prediction_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + available_transports = [ + transports.PredictionServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, PredictionServiceAsyncClient +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", grpc_helpers), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_prediction_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.PredictRequest, + dict, +]) +def test_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + model='model_value', + model_version_id='model_version_id_value', + model_display_name='model_display_name_value', + ) + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + assert response.model == 'model_value' + assert response.model_version_id == 'model_version_id_value' + assert response.model_display_name == 'model_display_name_value' + + +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + +@pytest.mark.asyncio +async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + model='model_value', + model_version_id='model_version_id_value', + model_display_name='model_display_name_value', + )) + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + assert response.model == 'model_value' + assert response.model_version_id == 'model_version_id_value' + assert response.model_display_name == 'model_display_name_value' + + +@pytest.mark.asyncio +async def test_predict_async_from_dict(): + await test_predict_async(request_type=dict) + + +def test_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = prediction_service.PredictResponse() + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + + +def test_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.RawPredictRequest, + dict, +]) +def test_raw_predict(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody( + content_type='content_type_value', + data=b'data_blob', + ) + response = client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, httpbody_pb2.HttpBody) + assert response.content_type == 'content_type_value' + assert response.data == b'data_blob' + + +def test_raw_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + client.raw_predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + +@pytest.mark.asyncio +async def test_raw_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.RawPredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody( + content_type='content_type_value', + data=b'data_blob', + )) + response = await client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.RawPredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, httpbody_pb2.HttpBody) + assert response.content_type == 'content_type_value' + assert response.data == b'data_blob' + + +@pytest.mark.asyncio +async def test_raw_predict_async_from_dict(): + await test_raw_predict_async(request_type=dict) + + +def test_raw_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.RawPredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + call.return_value = httpbody_pb2.HttpBody() + client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_raw_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.RawPredictRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) + await client.raw_predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_raw_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.raw_predict( + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].http_body + mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') + assert arg == mock_val + + +def test_raw_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.raw_predict( + prediction_service.RawPredictRequest(), + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + +@pytest.mark.asyncio +async def test_raw_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.raw_predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = httpbody_pb2.HttpBody() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.raw_predict( + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].http_body + mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_raw_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.raw_predict( + prediction_service.RawPredictRequest(), + endpoint='endpoint_value', + http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + prediction_service.ExplainRequest, + dict, +]) +def test_explain(request_type, transport: str = 'grpc'): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse( + deployed_model_id='deployed_model_id_value', + ) + response = client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.ExplainResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +def test_explain_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + client.explain() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + +@pytest.mark.asyncio +async def test_explain_async(transport: str = 'grpc_asyncio', request_type=prediction_service.ExplainRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse( + deployed_model_id='deployed_model_id_value', + )) + response = await client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.ExplainResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +@pytest.mark.asyncio +async def test_explain_async_from_dict(): + await test_explain_async(request_type=dict) + + +def test_explain_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ExplainRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + call.return_value = prediction_service.ExplainResponse() + client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_explain_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ExplainRequest() + + request.endpoint = 'endpoint_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) + await client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint_value', + ) in kw['metadata'] + + +def test_explain_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.explain( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + + +def test_explain_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.explain( + prediction_service.ExplainRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + +@pytest.mark.asyncio +async def test_explain_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.explain( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = 'endpoint_value' + assert arg == mock_val + arg = args[0].instances + mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert arg == mock_val + arg = args[0].parameters + mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + from proto.marshal import Marshal + from proto.marshal.rules.struct import ValueRule + rule = ValueRule(marshal=Marshal(name="Test")) + mock_val = rule.to_python(mock_val) + assert arg == mock_val + arg = args[0].deployed_model_id + mock_val = 'deployed_model_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_explain_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.explain( + prediction_service.ExplainRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = PredictionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PredictionServiceGrpcTransport, + ) + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'predict', + 'raw_predict', + 'explain', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_prediction_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport() + adc.assert_called_once() + + +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +def test_prediction_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PredictionServiceGrpcTransport, grpc_helpers), + (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_prediction_service_host_no_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_prediction_service_host_with_port(transport_name): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_prediction_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PredictionServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = PredictionServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PredictionServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = PredictionServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_model_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PredictionServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = PredictionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = PredictionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = PredictionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PredictionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = PredictionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = PredictionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = PredictionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PredictionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = PredictionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py new file mode 100644 index 0000000000..5b04cfb17e --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -0,0 +1,3925 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import transports +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (SpecialistPoolServiceClient, "grpc"), + (SpecialistPoolServiceAsyncClient, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_specialist_pool_service_client_get_transport_class(): + transport = SpecialistPoolServiceClient.get_transport_class() + available_transports = [ + transports.SpecialistPoolServiceGrpcTransport, + ] + assert transport in available_transports + + transport = SpecialistPoolServiceClient.get_transport_class("grpc") + assert transport == transports.SpecialistPoolServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, SpecialistPoolServiceAsyncClient +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +def test_specialist_pool_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", grpc_helpers), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_specialist_pool_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = SpecialistPoolServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", grpc_helpers), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_specialist_pool_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.CreateSpecialistPoolRequest, + dict, +]) +def test_create_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + client.create_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_specialist_pool_async_from_dict(): + await test_create_specialist_pool_async(request_type=dict) + + +def test_create_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + + +def test_create_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.GetSpecialistPoolRequest, + dict, +]) +def test_get_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + specialist_worker_emails=['specialist_worker_emails_value'], + ) + response = client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + assert response.specialist_worker_emails == ['specialist_worker_emails_value'] + + +def test_get_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + client.get_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + specialist_worker_emails=['specialist_worker_emails_value'], + )) + response = await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + assert response.specialist_worker_emails == ['specialist_worker_emails_value'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_async_from_dict(): + await test_get_specialist_pool_async(request_type=dict) + + +def test_get_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = specialist_pool.SpecialistPool() + client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.ListSpecialistPoolsRequest, + dict, +]) +def test_list_specialist_pools(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_specialist_pools_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + client.list_specialist_pools() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + +@pytest.mark.asyncio +async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_from_dict(): + await test_list_specialist_pools_async(request_type=dict) + + +def test_list_specialist_pools_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_specialist_pools_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_specialist_pools_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_specialist_pools_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + + +def test_list_specialist_pools_pager(transport_name: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_specialist_pools(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in results) +def test_list_specialist_pools_pages(transport_name: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = list(client.list_specialist_pools(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pager(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_specialist_pools(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pages(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_specialist_pools(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.DeleteSpecialistPoolRequest, + dict, +]) +def test_delete_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + client.delete_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async_from_dict(): + await test_delete_specialist_pool_async(request_type=dict) + + +def test_delete_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + specialist_pool_service.UpdateSpecialistPoolRequest, + dict, +]) +def test_update_specialist_pool(request_type, transport: str = 'grpc'): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + client.update_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + +@pytest.mark.asyncio +async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_async_from_dict(): + await test_update_specialist_pool_async(request_type=dict) + + +def test_update_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=name_value', + ) in kw['metadata'] + + +def test_update_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].specialist_pool + mock_val = gca_specialist_pool.SpecialistPool(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SpecialistPoolServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = SpecialistPoolServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.SpecialistPoolServiceGrpcTransport, + ) + +def test_specialist_pool_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_specialist_pool_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_specialist_pool', + 'get_specialist_pool', + 'list_specialist_pools', + 'delete_specialist_pool', + 'update_specialist_pool', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_specialist_pool_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_specialist_pool_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport() + adc.assert_called_once() + + +def test_specialist_pool_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SpecialistPoolServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, + ], +) +def test_specialist_pool_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_specialist_pool_service_host_no_port(transport_name): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_specialist_pool_service_host_with_port(transport_name): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_specialist_pool_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_specialist_pool_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_specialist_pool_service_grpc_lro_client(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_service_grpc_lro_async_client(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_path(): + project = "squid" + location = "clam" + specialist_pool = "whelk" + expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) + assert expected == actual + + +def test_parse_specialist_pool_path(): + expected = { + "project": "octopus", + "location": "oyster", + "specialist_pool": "nudibranch", + } + path = SpecialistPoolServiceClient.specialist_pool_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = SpecialistPoolServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = SpecialistPoolServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = SpecialistPoolServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SpecialistPoolServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = SpecialistPoolServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = SpecialistPoolServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = SpecialistPoolServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SpecialistPoolServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = SpecialistPoolServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = SpecialistPoolServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py new file mode 100644 index 0000000000..b93627e9ac --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -0,0 +1,10365 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import transports +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None + assert TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), +]) +def test_tensorboard_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TensorboardServiceGrpcTransport, "grpc"), + (transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_tensorboard_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (TensorboardServiceClient, "grpc"), + (TensorboardServiceAsyncClient, "grpc_asyncio"), +]) +def test_tensorboard_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_tensorboard_service_client_get_transport_class(): + transport = TensorboardServiceClient.get_transport_class() + available_transports = [ + transports.TensorboardServiceGrpcTransport, + ] + assert transport in available_transports + + transport = TensorboardServiceClient.get_transport_class("grpc") + assert transport == transports.TensorboardServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +def test_tensorboard_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "true"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "false"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_tensorboard_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + TensorboardServiceClient, TensorboardServiceAsyncClient +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +def test_tensorboard_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_tensorboard_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", grpc_helpers), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_tensorboard_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_tensorboard_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = TensorboardServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", grpc_helpers), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_tensorboard_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardRequest, + dict, +]) +def test_create_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + client.create_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_tensorboard_async_from_dict(): + await test_create_tensorboard_async(request_type=dict) + + +def test_create_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard( + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + + +def test_create_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard( + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardRequest, + dict, +]) +def test_get_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard( + name='name_value', + display_name='display_name_value', + description='description_value', + blob_storage_path_prefix='blob_storage_path_prefix_value', + run_count=989, + etag='etag_value', + ) + response = client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard.Tensorboard) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' + assert response.run_count == 989 + assert response.etag == 'etag_value' + + +def test_get_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + client.get_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard( + name='name_value', + display_name='display_name_value', + description='description_value', + blob_storage_path_prefix='blob_storage_path_prefix_value', + run_count=989, + etag='etag_value', + )) + response = await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard.Tensorboard) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' + assert response.run_count == 989 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_async_from_dict(): + await test_get_tensorboard_async(request_type=dict) + + +def test_get_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + call.return_value = tensorboard.Tensorboard() + client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) + await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardRequest, + dict, +]) +def test_update_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + client.update_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_tensorboard_async_from_dict(): + await test_update_tensorboard_async(request_type=dict) + + +def test_update_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + + request.tensorboard.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + + request.tensorboard.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = gca_tensorboard.Tensorboard(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardsRequest, + dict, +]) +def test_list_tensorboards(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboards_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + client.list_tensorboards() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + +@pytest.mark.asyncio +async def test_list_tensorboards_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_from_dict(): + await test_list_tensorboards_async(request_type=dict) + + +def test_list_tensorboards_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardsResponse() + client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboards_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) + await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboards_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboards( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboards_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboards( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboards_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboards(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) + for i in results) +def test_list_tensorboards_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboards(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboards(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboards(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardRequest, + dict, +]) +def test_delete_tensorboard(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + client.delete_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_async_from_dict(): + await test_delete_tensorboard_async(request_type=dict) + + +def test_delete_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardExperimentRequest, + dict, +]) +def test_create_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_create_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + client.create_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async_from_dict(): + await test_create_tensorboard_experiment_async(request_type=dict) + + +def test_create_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_experiment( + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_experiment_id + mock_val = 'tensorboard_experiment_id_value' + assert arg == mock_val + + +def test_create_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_experiment( + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_experiment_id + mock_val = 'tensorboard_experiment_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardExperimentRequest, + dict, +]) +def test_get_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_get_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + client.get_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async_from_dict(): + await test_get_tensorboard_experiment_async(request_type=dict) + + +def test_get_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + call.return_value = tensorboard_experiment.TensorboardExperiment() + client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) + await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardExperimentRequest, + dict, +]) +def test_update_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_update_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + client.update_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async_from_dict(): + await test_update_tensorboard_experiment_async(request_type=dict) + + +def test_update_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + + request.tensorboard_experiment.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + + request.tensorboard_experiment.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardExperimentsRequest, + dict, +]) +def test_list_tensorboard_experiments(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardExperimentsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_experiments_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + client.list_tensorboard_experiments() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardExperimentsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_from_dict(): + await test_list_tensorboard_experiments_async(request_type=dict) + + +def test_list_tensorboard_experiments_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) + await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboard_experiments_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_experiments( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboard_experiments_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_experiments( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_experiments_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_experiments(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) + for i in results) +def test_list_tensorboard_experiments_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_experiments(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_experiments(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_experiments(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardExperimentRequest, + dict, +]) +def test_delete_tensorboard_experiment(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + client.delete_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async_from_dict(): + await test_delete_tensorboard_experiment_async(request_type=dict) + + +def test_delete_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardRunRequest, + dict, +]) +def test_create_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_create_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + client.create_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async_from_dict(): + await test_create_tensorboard_run_async(request_type=dict) + + +def test_create_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_run( + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_run_id + mock_val = 'tensorboard_run_id_value' + assert arg == mock_val + + +def test_create_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_run( + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].tensorboard_run_id + mock_val = 'tensorboard_run_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.BatchCreateTensorboardRunsRequest, + dict, +]) +def test_batch_create_tensorboard_runs(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse( + ) + response = client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) + + +def test_batch_create_tensorboard_runs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + client.batch_create_tensorboard_runs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardRunsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse( + )) + response = await client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_async_from_dict(): + await test_batch_create_tensorboard_runs_async(request_type=dict) + + +def test_batch_create_tensorboard_runs_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) + await client.batch_create_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_create_tensorboard_runs_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_tensorboard_runs( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] + assert arg == mock_val + + +def test_batch_create_tensorboard_runs_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_tensorboard_runs( + tensorboard_service.BatchCreateTensorboardRunsRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_tensorboard_runs( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_runs_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_tensorboard_runs( + tensorboard_service.BatchCreateTensorboardRunsRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardRunRequest, + dict, +]) +def test_get_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + client.get_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async_from_dict(): + await test_get_tensorboard_run_async(request_type=dict) + + +def test_get_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + call.return_value = tensorboard_run.TensorboardRun() + client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) + await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardRunRequest, + dict, +]) +def test_update_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + client.update_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async_from_dict(): + await test_update_tensorboard_run_async(request_type=dict) + + +def test_update_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + + request.tensorboard_run.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + + request.tensorboard_run.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardRunsRequest, + dict, +]) +def test_list_tensorboard_runs(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardRunsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_runs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + client.list_tensorboard_runs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardRunsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardRunsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_from_dict(): + await test_list_tensorboard_runs_async(request_type=dict) + + +def test_list_tensorboard_runs_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) + await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboard_runs_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_runs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboard_runs_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_runs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_runs_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_runs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) + for i in results) +def test_list_tensorboard_runs_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_runs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_runs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_runs(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardRunRequest, + dict, +]) +def test_delete_tensorboard_run(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + client.delete_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async_from_dict(): + await test_delete_tensorboard_run_async(request_type=dict) + + +def test_delete_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, + dict, +]) +def test_batch_create_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( + ) + response = client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) + + +def test_batch_create_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + client.batch_create_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( + )) + response = await client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_async_from_dict(): + await test_batch_create_tensorboard_time_series_async(request_type=dict) + + +def test_batch_create_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) + await client.batch_create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_batch_create_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_tensorboard_time_series( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] + assert arg == mock_val + + +def test_batch_create_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_tensorboard_time_series( + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_tensorboard_time_series( + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].requests + mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_create_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_tensorboard_time_series( + tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), + parent='parent_value', + requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.CreateTensorboardTimeSeriesRequest, + dict, +]) +def test_create_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_create_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + client.create_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async_from_dict(): + await test_create_tensorboard_time_series_async(request_type=dict) + + +def test_create_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_time_series( + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + + +def test_create_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_time_series( + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.GetTensorboardTimeSeriesRequest, + dict, +]) +def test_get_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_get_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + client.get_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async_from_dict(): + await test_get_tensorboard_time_series_async(request_type=dict) + + +def test_get_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) + await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.UpdateTensorboardTimeSeriesRequest, + dict, +]) +def test_update_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_update_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + client.update_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async_from_dict(): + await test_update_tensorboard_time_series_async(request_type=dict) + + +def test_update_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + request.tensorboard_time_series.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + request.tensorboard_time_series.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series.name=name_value', + ) in kw['metadata'] + + +def test_update_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ListTensorboardTimeSeriesRequest, + dict, +]) +def test_list_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardTimeSeriesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + client.list_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_from_dict(): + await test_list_tensorboard_time_series_async(request_type=dict) + + +def test_list_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) + await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_time_series( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_time_series( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_time_series_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_time_series(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in results) +def test_list_tensorboard_time_series_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_time_series(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_time_series(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_time_series(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.DeleteTensorboardTimeSeriesRequest, + dict, +]) +def test_delete_tensorboard_time_series(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + client.delete_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async_from_dict(): + await test_delete_tensorboard_time_series_async(request_type=dict) + + +def test_delete_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, + dict, +]) +def test_batch_read_tensorboard_time_series_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( + ) + response = client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) + + +def test_batch_read_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + client.batch_read_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( + )) + response = await client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_async_from_dict(): + await test_batch_read_tensorboard_time_series_data_async(request_type=dict) + + +def test_batch_read_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + request.tensorboard = 'tensorboard_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard=tensorboard_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() + + request.tensorboard = 'tensorboard_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) + await client.batch_read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard=tensorboard_value', + ) in kw['metadata'] + + +def test_batch_read_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_tensorboard_time_series_data( + tensorboard='tensorboard_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = 'tensorboard_value' + assert arg == mock_val + + +def test_batch_read_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_tensorboard_time_series_data( + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), + tensorboard='tensorboard_value', + ) + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_tensorboard_time_series_data( + tensorboard='tensorboard_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard + mock_val = 'tensorboard_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_batch_read_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_tensorboard_time_series_data( + tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), + tensorboard='tensorboard_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ReadTensorboardTimeSeriesDataRequest, + dict, +]) +def test_read_tensorboard_time_series_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse( + ) + response = client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) + + +def test_read_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + client.read_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse( + )) + response = await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async_from_dict(): + await test_read_tensorboard_time_series_data_async(request_type=dict) + + +def test_read_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) + await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +def test_read_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + + +def test_read_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ReadTensorboardBlobDataRequest, + dict, +]) +def test_read_tensorboard_blob_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + response = client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +def test_read_tensorboard_blob_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + client.read_tensorboard_blob_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) + response = await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async_from_dict(): + await test_read_tensorboard_blob_data_async(request_type=dict) + + +def test_read_tensorboard_blob_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + + request.time_series = 'time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'time_series=time_series_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + + request.time_series = 'time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) + await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'time_series=time_series_value', + ) in kw['metadata'] + + +def test_read_tensorboard_blob_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_blob_data( + time_series='time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].time_series + mock_val = 'time_series_value' + assert arg == mock_val + + +def test_read_tensorboard_blob_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series='time_series_value', + ) + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_blob_data( + time_series='time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].time_series + mock_val = 'time_series_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series='time_series_value', + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.WriteTensorboardExperimentDataRequest, + dict, +]) +def test_write_tensorboard_experiment_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse( + ) + response = client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) + + +def test_write_tensorboard_experiment_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + client.write_tensorboard_experiment_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardExperimentDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse( + )) + response = await client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_async_from_dict(): + await test_write_tensorboard_experiment_data_async(request_type=dict) + + +def test_write_tensorboard_experiment_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardExperimentDataRequest() + + request.tensorboard_experiment = 'tensorboard_experiment_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment=tensorboard_experiment_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardExperimentDataRequest() + + request.tensorboard_experiment = 'tensorboard_experiment_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) + await client.write_tensorboard_experiment_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment=tensorboard_experiment_value', + ) in kw['metadata'] + + +def test_write_tensorboard_experiment_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_tensorboard_experiment_data( + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = 'tensorboard_experiment_value' + assert arg == mock_val + arg = args[0].write_run_data_requests + mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] + assert arg == mock_val + + +def test_write_tensorboard_experiment_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_tensorboard_experiment_data( + tensorboard_service.WriteTensorboardExperimentDataRequest(), + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_experiment_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_tensorboard_experiment_data( + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_experiment + mock_val = 'tensorboard_experiment_value' + assert arg == mock_val + arg = args[0].write_run_data_requests + mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_write_tensorboard_experiment_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_tensorboard_experiment_data( + tensorboard_service.WriteTensorboardExperimentDataRequest(), + tensorboard_experiment='tensorboard_experiment_value', + write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.WriteTensorboardRunDataRequest, + dict, +]) +def test_write_tensorboard_run_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse( + ) + response = client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +def test_write_tensorboard_run_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + client.write_tensorboard_run_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardRunDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse( + )) + response = await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async_from_dict(): + await test_write_tensorboard_run_data_async(request_type=dict) + + +def test_write_tensorboard_run_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + + request.tensorboard_run = 'tensorboard_run_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run=tensorboard_run_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + + request.tensorboard_run = 'tensorboard_run_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) + await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run=tensorboard_run_value', + ) in kw['metadata'] + + +def test_write_tensorboard_run_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_tensorboard_run_data( + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = 'tensorboard_run_value' + assert arg == mock_val + arg = args[0].time_series_data + mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] + assert arg == mock_val + + +def test_write_tensorboard_run_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_tensorboard_run_data( + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_run + mock_val = 'tensorboard_run_value' + assert arg == mock_val + arg = args[0].time_series_data + mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + dict, +]) +def test_export_tensorboard_time_series_data(request_type, transport: str = 'grpc'): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token='next_page_token_value', + ) + response = client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_export_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + client.export_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token='next_page_token_value', + )) + response = await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_from_dict(): + await test_export_tensorboard_time_series_data_async(request_type=dict) + + +def test_export_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) + await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series_value', + ) in kw['metadata'] + + +def test_export_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + + +def test_export_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].tensorboard_time_series + mock_val = 'tensorboard_time_series_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +def test_export_tensorboard_time_series_data_pager(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('tensorboard_time_series', ''), + )), + ) + pager = client.export_tensorboard_time_series_data(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) + for i in results) +def test_export_tensorboard_time_series_data_pages(transport_name: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = list(client.export_tensorboard_time_series_data(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.export_tensorboard_time_series_data(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) + for i in responses) + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.export_tensorboard_time_series_data(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TensorboardServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = TensorboardServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.TensorboardServiceGrpcTransport, + ) + +def test_tensorboard_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TensorboardServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_tensorboard_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TensorboardServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_tensorboard', + 'get_tensorboard', + 'update_tensorboard', + 'list_tensorboards', + 'delete_tensorboard', + 'create_tensorboard_experiment', + 'get_tensorboard_experiment', + 'update_tensorboard_experiment', + 'list_tensorboard_experiments', + 'delete_tensorboard_experiment', + 'create_tensorboard_run', + 'batch_create_tensorboard_runs', + 'get_tensorboard_run', + 'update_tensorboard_run', + 'list_tensorboard_runs', + 'delete_tensorboard_run', + 'batch_create_tensorboard_time_series', + 'create_tensorboard_time_series', + 'get_tensorboard_time_series', + 'update_tensorboard_time_series', + 'list_tensorboard_time_series', + 'delete_tensorboard_time_series', + 'batch_read_tensorboard_time_series_data', + 'read_tensorboard_time_series_data', + 'read_tensorboard_blob_data', + 'write_tensorboard_experiment_data', + 'write_tensorboard_run_data', + 'export_tensorboard_time_series_data', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_tensorboard_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_tensorboard_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport() + adc.assert_called_once() + + +def test_tensorboard_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TensorboardServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +def test_tensorboard_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.TensorboardServiceGrpcTransport, grpc_helpers), + (transports.TensorboardServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_tensorboard_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_tensorboard_service_host_no_port(transport_name): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_tensorboard_service_host_with_port(transport_name): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_tensorboard_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_tensorboard_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_tensorboard_service_grpc_lro_client(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_service_grpc_lro_async_client(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "octopus", + "location": "oyster", + "tensorboard": "nudibranch", + } + path = TensorboardServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_path(path) + assert expected == actual + +def test_tensorboard_experiment_path(): + project = "cuttlefish" + location = "mussel" + tensorboard = "winkle" + experiment = "nautilus" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) + actual = TensorboardServiceClient.tensorboard_experiment_path(project, location, tensorboard, experiment) + assert expected == actual + + +def test_parse_tensorboard_experiment_path(): + expected = { + "project": "scallop", + "location": "abalone", + "tensorboard": "squid", + "experiment": "clam", + } + path = TensorboardServiceClient.tensorboard_experiment_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path) + assert expected == actual + +def test_tensorboard_run_path(): + project = "whelk" + location = "octopus" + tensorboard = "oyster" + experiment = "nudibranch" + run = "cuttlefish" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) + actual = TensorboardServiceClient.tensorboard_run_path(project, location, tensorboard, experiment, run) + assert expected == actual + + +def test_parse_tensorboard_run_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + "experiment": "scallop", + "run": "abalone", + } + path = TensorboardServiceClient.tensorboard_run_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_run_path(path) + assert expected == actual + +def test_tensorboard_time_series_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + experiment = "octopus" + run = "oyster" + time_series = "nudibranch" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) + actual = TensorboardServiceClient.tensorboard_time_series_path(project, location, tensorboard, experiment, run, time_series) + assert expected == actual + + +def test_parse_tensorboard_time_series_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + "tensorboard": "winkle", + "experiment": "nautilus", + "run": "scallop", + "time_series": "abalone", + } + path = TensorboardServiceClient.tensorboard_time_series_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TensorboardServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TensorboardServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TensorboardServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TensorboardServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TensorboardServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TensorboardServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TensorboardServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TensorboardServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TensorboardServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TensorboardServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = TensorboardServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py new file mode 100644 index 0000000000..914407d71b --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -0,0 +1,6176 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceClient +from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers +from google.cloud.aiplatform_v1beta1.services.vizier_service import transports +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert VizierServiceClient._get_default_mtls_endpoint(None) is None + assert VizierServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), +]) +def test_vizier_service_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.VizierServiceGrpcTransport, "grpc"), + (transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_vizier_service_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (VizierServiceClient, "grpc"), + (VizierServiceAsyncClient, "grpc_asyncio"), +]) +def test_vizier_service_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + + +def test_vizier_service_client_get_transport_class(): + transport = VizierServiceClient.get_transport_class() + available_transports = [ + transports.VizierServiceGrpcTransport, + ] + assert transport in available_transports + + transport = VizierServiceClient.get_transport_class("grpc") + assert transport == transports.VizierServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +def test_vizier_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "true"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "false"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_vizier_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class", [ + VizierServiceClient, VizierServiceAsyncClient +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +def test_vizier_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_vizier_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", grpc_helpers), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_vizier_service_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +def test_vizier_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = VizierServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", grpc_helpers), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_vizier_service_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CreateStudyRequest, + dict, +]) +def test_create_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study( + name='name_value', + display_name='display_name_value', + state=gca_study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_create_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + client.create_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + +@pytest.mark.asyncio +async def test_create_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study( + name='name_value', + display_name='display_name_value', + state=gca_study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_create_study_async_from_dict(): + await test_create_study_async(request_type=dict) + + +def test_create_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + call.return_value = gca_study.Study() + client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) + await client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_study( + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].study + mock_val = gca_study.Study(name='name_value') + assert arg == mock_val + + +def test_create_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_study( + vizier_service.CreateStudyRequest(), + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_study( + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].study + mock_val = gca_study.Study(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_study( + vizier_service.CreateStudyRequest(), + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.GetStudyRequest, + dict, +]) +def test_get_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_get_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + client.get_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + +@pytest.mark.asyncio +async def test_get_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_get_study_async_from_dict(): + await test_get_study_async(request_type=dict) + + +def test_get_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + call.return_value = study.Study() + client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + await client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_study( + vizier_service.GetStudyRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_study( + vizier_service.GetStudyRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.ListStudiesRequest, + dict, +]) +def test_list_studies(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListStudiesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_studies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + client.list_studies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + +@pytest.mark.asyncio +async def test_list_studies_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListStudiesRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListStudiesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_studies_async_from_dict(): + await test_list_studies_async(request_type=dict) + + +def test_list_studies_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListStudiesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + call.return_value = vizier_service.ListStudiesResponse() + client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_studies_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListStudiesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) + await client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_studies_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_studies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_studies_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_studies( + vizier_service.ListStudiesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_studies_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_studies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_studies_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_studies( + vizier_service.ListStudiesRequest(), + parent='parent_value', + ) + + +def test_list_studies_pager(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_studies(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, study.Study) + for i in results) +def test_list_studies_pages(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + pages = list(client.list_studies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_studies_async_pager(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_studies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, study.Study) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_studies_async_pages(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_studies(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + vizier_service.DeleteStudyRequest, + dict, +]) +def test_delete_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + client.delete_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + +@pytest.mark.asyncio +async def test_delete_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_study_async_from_dict(): + await test_delete_study_async(request_type=dict) + + +def test_delete_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + call.return_value = None + client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteStudyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_study( + vizier_service.DeleteStudyRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_study( + vizier_service.DeleteStudyRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.LookupStudyRequest, + dict, +]) +def test_lookup_study(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_lookup_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + client.lookup_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + +@pytest.mark.asyncio +async def test_lookup_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.LookupStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_lookup_study_async_from_dict(): + await test_lookup_study_async(request_type=dict) + + +def test_lookup_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.LookupStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + call.return_value = study.Study() + client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_lookup_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.LookupStudyRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + await client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_lookup_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.lookup_study( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_lookup_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.lookup_study( + vizier_service.LookupStudyRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_lookup_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.lookup_study( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_lookup_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.lookup_study( + vizier_service.LookupStudyRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.SuggestTrialsRequest, + dict, +]) +def test_suggest_trials(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_suggest_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + client.suggest_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + +@pytest.mark.asyncio +async def test_suggest_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.SuggestTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_suggest_trials_async_from_dict(): + await test_suggest_trials_async(request_type=dict) + + +def test_suggest_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.SuggestTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_suggest_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.SuggestTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CreateTrialRequest, + dict, +]) +def test_create_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_create_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + client.create_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + +@pytest.mark.asyncio +async def test_create_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_create_trial_async_from_dict(): + await test_create_trial_async(request_type=dict) + + +def test_create_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateTrialRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + call.return_value = study.Trial() + client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateTrialRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_trial( + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].trial + mock_val = study.Trial(name='name_value') + assert arg == mock_val + + +def test_create_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_trial( + vizier_service.CreateTrialRequest(), + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_trial( + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].trial + mock_val = study.Trial(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_trial( + vizier_service.CreateTrialRequest(), + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.GetTrialRequest, + dict, +]) +def test_get_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_get_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + client.get_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + +@pytest.mark.asyncio +async def test_get_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_get_trial_async_from_dict(): + await test_get_trial_async(request_type=dict) + + +def test_get_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + call.return_value = study.Trial() + client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_trial( + vizier_service.GetTrialRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_trial( + vizier_service.GetTrialRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.ListTrialsRequest, + dict, +]) +def test_list_trials(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrialsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + client.list_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + +@pytest.mark.asyncio +async def test_list_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrialsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_trials_async_from_dict(): + await test_list_trials_async(request_type=dict) + + +def test_list_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + call.return_value = vizier_service.ListTrialsResponse() + client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) + await client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_trials_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_trials_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_trials( + vizier_service.ListTrialsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_trials_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_trials_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_trials( + vizier_service.ListTrialsRequest(), + parent='parent_value', + ) + + +def test_list_trials_pager(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_trials(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, study.Trial) + for i in results) +def test_list_trials_pages(transport_name: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + pages = list(client.list_trials(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_trials_async_pager(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_trials(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, study.Trial) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_trials_async_pages(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_trials(request={})).pages: # pragma: no branch + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + vizier_service.AddTrialMeasurementRequest, + dict, +]) +def test_add_trial_measurement(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_add_trial_measurement_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + client.add_trial_measurement() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + +@pytest.mark.asyncio +async def test_add_trial_measurement_async(transport: str = 'grpc_asyncio', request_type=vizier_service.AddTrialMeasurementRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_add_trial_measurement_async_from_dict(): + await test_add_trial_measurement_async(request_type=dict) + + +def test_add_trial_measurement_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.AddTrialMeasurementRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + call.return_value = study.Trial() + client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_trial_measurement_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.AddTrialMeasurementRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CompleteTrialRequest, + dict, +]) +def test_complete_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_complete_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + client.complete_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + +@pytest.mark.asyncio +async def test_complete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CompleteTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_complete_trial_async_from_dict(): + await test_complete_trial_async(request_type=dict) + + +def test_complete_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CompleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + call.return_value = study.Trial() + client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_complete_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CompleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.DeleteTrialRequest, + dict, +]) +def test_delete_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + client.delete_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + +@pytest.mark.asyncio +async def test_delete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_trial_async_from_dict(): + await test_delete_trial_async(request_type=dict) + + +def test_delete_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + call.return_value = None + client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_trial( + vizier_service.DeleteTrialRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_trial( + vizier_service.DeleteTrialRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + vizier_service.CheckTrialEarlyStoppingStateRequest, + dict, +]) +def test_check_trial_early_stopping_state(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_check_trial_early_stopping_state_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + client.check_trial_early_stopping_state() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_async_from_dict(): + await test_check_trial_early_stopping_state_async(request_type=dict) + + +def test_check_trial_early_stopping_state_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CheckTrialEarlyStoppingStateRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CheckTrialEarlyStoppingStateRequest() + + request.trial_name = 'trial_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.StopTrialRequest, + dict, +]) +def test_stop_trial(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_stop_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + client.stop_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + +@pytest.mark.asyncio +async def test_stop_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.StopTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_stop_trial_async_from_dict(): + await test_stop_trial_async(request_type=dict) + + +def test_stop_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.StopTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + call.return_value = study.Trial() + client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_stop_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.StopTrialRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + vizier_service.ListOptimalTrialsRequest, + dict, +]) +def test_list_optimal_trials(request_type, transport: str = 'grpc'): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse( + ) + response = client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, vizier_service.ListOptimalTrialsResponse) + + +def test_list_optimal_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + client.list_optimal_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + +@pytest.mark.asyncio +async def test_list_optimal_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListOptimalTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse( + )) + response = await client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, vizier_service.ListOptimalTrialsResponse) + + +@pytest.mark.asyncio +async def test_list_optimal_trials_async_from_dict(): + await test_list_optimal_trials_async(request_type=dict) + + +def test_list_optimal_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListOptimalTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + call.return_value = vizier_service.ListOptimalTrialsResponse() + client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_optimal_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListOptimalTrialsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) + await client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_optimal_trials_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_optimal_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_optimal_trials_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_optimal_trials( + vizier_service.ListOptimalTrialsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_optimal_trials_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_optimal_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_optimal_trials_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_optimal_trials( + vizier_service.ListOptimalTrialsRequest(), + parent='parent_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = VizierServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.VizierServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.VizierServiceGrpcTransport, + transports.VizierServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", +]) +def test_transport_kind(transport_name): + transport = VizierServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.VizierServiceGrpcTransport, + ) + +def test_vizier_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.VizierServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_vizier_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.VizierServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_study', + 'get_study', + 'list_studies', + 'delete_study', + 'lookup_study', + 'suggest_trials', + 'create_trial', + 'get_trial', + 'list_trials', + 'add_trial_measurement', + 'complete_trial', + 'delete_trial', + 'check_trial_early_stopping_state', + 'stop_trial', + 'list_optimal_trials', + 'set_iam_policy', + 'get_iam_policy', + 'test_iam_permissions', + 'get_location', + 'list_locations', + 'get_operation', + 'wait_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_vizier_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VizierServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_vizier_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VizierServiceTransport() + adc.assert_called_once() + + +def test_vizier_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VizierServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VizierServiceGrpcTransport, + transports.VizierServiceGrpcAsyncIOTransport, + ], +) +def test_vizier_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.VizierServiceGrpcTransport, grpc_helpers), + (transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_vizier_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_vizier_service_host_no_port(transport_name): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:443' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", +]) +def test_vizier_service_host_with_port(transport_name): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'aiplatform.googleapis.com:8000' + ) + +def test_vizier_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VizierServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_vizier_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VizierServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_vizier_service_grpc_lro_client(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_vizier_service_grpc_lro_async_client(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_custom_job_path(): + project = "squid" + location = "clam" + custom_job = "whelk" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = VizierServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "custom_job": "nudibranch", + } + path = VizierServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_study_path(): + project = "cuttlefish" + location = "mussel" + study = "winkle" + expected = "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) + actual = VizierServiceClient.study_path(project, location, study) + assert expected == actual + + +def test_parse_study_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "study": "abalone", + } + path = VizierServiceClient.study_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_study_path(path) + assert expected == actual + +def test_trial_path(): + project = "squid" + location = "clam" + study = "whelk" + trial = "octopus" + expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + actual = VizierServiceClient.trial_path(project, location, study, trial) + assert expected == actual + + +def test_parse_trial_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "study": "cuttlefish", + "trial": "mussel", + } + path = VizierServiceClient.trial_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_trial_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = VizierServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = VizierServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = VizierServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = VizierServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = VizierServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = VizierServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = VizierServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = VizierServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = VizierServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = VizierServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = VizierServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + +def test_wait_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_wait_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_wait_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations(transport: str = "grpc"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + +def test_set_iam_policy_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + +def test_set_iam_policy_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + +def test_get_iam_policy(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy() + ) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + +def test_test_iam_permissions(transport: str = "grpc"): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'grpc', + ] + for transport in transports: + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..231bc12501 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#